@article {Bourja2022994, title = {End-to-End Car Make and Model Classification using Compound Scaling and Transfer Learning}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {13}, number = {5}, year = {2022}, note = {cited By 0}, pages = {994-1001}, abstract = {Recently, Morocco has started to invest in IoT systems to transform our cities into smart cities that will promote economic growth and make life easier for citizens. One of the most vital addition is intelligent transportation systems which represent the foundation of a smart city. However, the problem often faced in such systems is the recognition of entities, in our case, car and model makes. This paper proposes an approach that identifies makes and models for cars using transfer learning and a workflow that first enhances image quality and quantity by data augmentation and then feeds the newly generated data into a deep learning model with a scaling feature{\textendash}that is, compound scaling. In addition, we developed a web interface using the FLASK API to make real-time predictions. The results obtained were 80\% accuracy, fine-tuning it to an accuracy rate of 90\% on unseen data. Our framework is trained on the commonly used Stanford Cars dataset. {\textcopyright} 2022. International Journal of Advanced Computer Science and Applications. All Rights Reserved.}, keywords = {Application programming interfaces (API), Compound scaling, Deep learning, Economic growths, Economics, End to end, Image enhancement, Intelligent systems, Intelligent transportation systems, Internet of things, IOT, Model classification, Scalings, Smart city, Transfer learning, Vehicle classification}, doi = {10.14569/IJACSA.2022.01305111}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85131410221\&doi=10.14569\%2fIJACSA.2022.01305111\&partnerID=40\&md5=aeb1c5a894ab70005066f491ebf3623c}, author = {Bourja, O. and Maach, A. and Zannouti, Z. and Derrouz, H. and Mekhzoum, H. and Abdelali, H.A. and Thami, R.O.H. and Bourzeix, F.} } @article {Badaoui2022646, title = {Fuzzy Centrality Measures: A Survey}, journal = {Lecture Notes in Networks and Systems}, volume = {505 LNNS}, year = {2022}, note = {cited By 0}, pages = {646-654}, abstract = {Most real-world problems can be pictured as a set of connections and interactions between various entities. Together, these entities create a complex phenomenon investigated in the form of complex networks. Each of the entities in the network plays a particular role in the definition of the structure and the analysis of the studied problem. Several measures of centrality have been proposed in the literature to estimate the contribution and quantify the relevance of network entities. The most influential nodes are defined either locally, via the measurement of their connections with their directly related neighbors, or globally, via the measurement of the importance of their neighbors or their relevance in terms of contribution to the fast propagation of information based on the shortest paths. Due to the incompleteness of real-world data, crisp representations do not adequately describe the problem. Therefore, fuzzy graphs have been proposed to give more realistic representations by taking into account the uncertainties present in data. This paper proposes a state of the art of fuzzy centrality measures with a focus on proposed studies on urban traffic networks. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-09176-6_72}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135013682\&doi=10.1007\%2f978-3-031-09176-6_72\&partnerID=40\&md5=20f3763938d5a04e48899262e6cbc4e9}, author = {Badaoui, F.-E. and Boulmakoul, A. and Lbath, A. and Thami, R.O.H. and Cherradi, G. and Karim, L. and Bouziri, A.E.} } @article {Benlakhdar20221599, title = {Statistical modeling of directional data using a robust hierarchical von mises distribution model: perspectives for wind energy}, journal = {Computational Statistics}, volume = {37}, number = {4}, year = {2022}, note = {cited By 0}, pages = {1599-1619}, abstract = {For describing wind direction, a variety of statistical distributions has been suggested that provides information about the wind regime at a particular location and aids the development of efficient wind energy generation. In this paper a systematic approach for data classification putting a special emphasis on the von Mises mixtures is presented. A von Mises mixture model is broad enough to cover, on one hand, symmetry and asymmetry, on the other hand, unimodality and multimodality of circular data. We developed an improved mathematical model of the classical von Mises mixture method, rests on number of principles which gives its internal coherence and originality. In principle, our hierarchical model of von Mises distributions is flexible to precisely modeled complex directional data sets. We define a new specific expectation{\textendash}maximization (S-EM) algorithm for estimating the parameters of the model. The simulation showed that satisfactory fit of complex directional data could be obtained (error generally < 1\%). Furthermore, the Bayesian Information Criterion is used to judge the goodness of fit and the suitability for this model versus common distributions found in the literature. The findings prove that our hierarchical model of von Mises distributions is relevant for modeling the complex directional data with several modes and/or prevailing data directions. {\textcopyright} 2021, The Author(s), under exclusive licence to Springer-Verlag GmbH Germany, part of Springer Nature.}, doi = {10.1007/s00180-021-01173-5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85122431362\&doi=10.1007\%2fs00180-021-01173-5\&partnerID=40\&md5=1983e074fcfeb1ae4f99796700245614}, author = {Benlakhdar, S. and Rziza, M. and Thami, R.O.H.} } @article {Alaoui2021, title = {Fall detection of elderly people using the manifold of positive semidefinite matrices}, journal = {Journal of Imaging}, volume = {7}, number = {7}, year = {2021}, note = {cited By 3}, abstract = {Falls are one of the most critical health care risks for elderly people, being, in some adverse circumstances, an indirect cause of death. Furthermore, demographic forecasts for the future show a growing elderly population worldwide. In this context, models for automatic fall detection and prediction are of paramount relevance, especially AI applications that use ambient, sensors or computer vision. In this paper, we present an approach for fall detection using computer vision techniques. Video sequences of a person in a closed environment are used as inputs to our algorithm. In our approach, we first apply the V2V-PoseNet model to detect 2D body skeleton in every frame. Specifically, our approach involves four steps: (1) the body skeleton is detected by V2V-PoseNet in each frame; (2) joints of skeleton are first mapped into the Riemannian manifold of positive semidefinite matrices of fixed-rank 2 to build time-parameterized trajectories; (3) a temporal warping is performed on the trajectories, providing a (dis-)similarity measure between them; (4) finally, a pairwise proximity function SVM is used to classify them into fall or non-fall, incorporating the (dis-)similarity measure into the kernel function. We evaluated our approach on two publicly available datasets URFD and Charfi. The results of the proposed approach are competitive with respect to state-of-the-art methods, while only involving 2D body skeletons. {\textcopyright} 2021 by the authors. Licensee MDPI, Basel, Switzerland.}, keywords = {Causes of death, Computer vision, Dynamic time warping, Elderly people, Elderly populations, Fall detection, Gram matrices, Health care, Health risks, Healthcare, Musculoskeletal system, Positive semidefinite matrices, Riemann Manifold, Similarity measure, Support vector machines}, doi = {10.3390/jimaging7070109}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111011519\&doi=10.3390\%2fjimaging7070109\&partnerID=40\&md5=fe18155309e5ff7e5a54dff191d8c05e}, author = {Alaoui, A.Y. and Tabii, Y. and Thami, R.O.H. and Daoudi, M. and Berretti, S. and Pala, P.} } @conference {Badaoui202112, title = {Fuzzy Dynamic Centrality for Urban Traffic Resilience}, booktitle = {2021 International Conference on Data Analytics for Business and Industry, ICDABI 2021}, year = {2021}, note = {cited By 1}, pages = {12-16}, abstract = {When traveling to and from home, work or vacation, we have all experienced traffic delays. As a result, road travelers often add time to their trip in advance to anticipate traffic jams. Meanwhile, unplanned traffic jams can be fundamentally problematic for travelers. Which are caused by a variety of reasons, such as road closures, events, or severe weather. In this study, we shed the light on the importance of travel time in the detection of traffic congestion. We propose a fuzzy process to measure the uncertainty in the computation of travel time in order to develop fuzzy dynamic centrality for the detection of traffic congestion. A simulation is conducted on a small area from Mohammedia city using microscopic traffic simulator SUMO{\textquoteright}. The calculation of the uncertainty of the estimated travel time over the study period is based on the generation of Gaussian fuzzy numbers. Our proposed study is an essential component of smart traffic monitoring and provides genuine assistance for urban network resilience. {\textcopyright} 2021 IEEE.}, keywords = {Fuzzy dynamic centrality, Fuzzy rules, Fuzzy travel time, Gaussian fuzzy numbers, Home work, Intelligent systems, Intelligent transportation systems, Intelligent vehicle highway systems, Motor transportation, Resilient networks, Roads and streets, Street traffic control, Traffic congestion, Traffic jams, Travel time, Travel-time, Uncertainty, Uncertainty analysis, Urban traffic, Vehicle actuated signals}, doi = {10.1109/ICDABI53623.2021.9655939}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85124647354\&doi=10.1109\%2fICDABI53623.2021.9655939\&partnerID=40\&md5=cff82ecaec7b839ed7c2803a46e25b18}, author = {Badaoui, F.-E. and Boulmakoul, A. and Thami, R.O.H.} } @article {Abdelali2021164282, title = {Multiple hypothesis detection and tracking using deep learning for video traffic surveillance}, journal = {IEEE Access}, volume = {9}, year = {2021}, note = {cited By 3}, pages = {164282-164291}, abstract = {Moroccan Intelligent Transport System is the first Moroccan system that uses the latest advances in computer vision, machine learning and deep learning techniques to manage Moroccan traffic and road violations.In this paper, we propose a fully automatic approach to Multiple Hypothesis Detection and Tracking (MHDT) for video traffic surveillance.The proposed framework combines Kalman filter and data association-based tracking methods using YOLO detection approach, to robustly track vehicles in complex traffic surveillance scenes.Experimental results demonstrate that the proposed approach is robust to detect and track the trajectory of the vehicles in different situations such as scale variation, stopped vehicles, rotation, varying illumination and occlusion.The proposed approach shows a competitive results (detection: 94.10\% accuracy, tracking: 92.50\% accuracy) compared to the state-of-the-art approaches. {\textcopyright} 2021 Institute of Electrical and Electronics Engineers Inc.. All rights reserved.}, keywords = {Bandpass filters, Computer vision, Convolutional neural network, Data association, Deep learning, Deep neural networks, Detection, Intelligent systems, Intelligent vehicle highway systems, Kalman filters, Monitoring, Multiple hypothesis tracking, Object detection, Occlusion handling, Predictive models, Security systems, Target tracking, Targets tracking, Traffic control, Traffic surveillance, Vehicles, Video sequences}, doi = {10.1109/ACCESS.2021.3133529}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85121370415\&doi=10.1109\%2fACCESS.2021.3133529\&partnerID=40\&md5=84b2a2537c16a7db00901d13c8b6eb5c}, author = {Abdelali, H.A.I.T. and Derrouz, H. and Zennayi, Y. and Thami, R.O.H. and Bourzeix, F.} } @article {Bourja2021915, title = {Real Time Vehicle Detection, Tracking, and Inter-vehicle Distance Estimation based on Stereovision and Deep Learning using Yolov3}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {12}, number = {8}, year = {2021}, note = {cited By 1}, pages = {915-923}, abstract = {Abstract{\textemdash}In this paper, we propose a robust real-time vehicle tracking and inter-vehicle distance estimation algorithm based on stereovision. Traffic images are captured by a stereoscopic system installed on the road, and then we detect moving vehicles with the YOLO V3 Deep Neural Network algorithm. Thus, the real-time video goes through an algorithm for stereoscopy-based measurement in order to estimate distance between detected vehicles. However, detecting the real-time objects have always been a challenging task because of occlusion, scale, illumination etc. Thus, many convolutional neural network models based on object detection were developed in recent years. But they cannot be used for real-time object analysis because of slow speed of recognition. The model which is performing excellent currently is the unified object detection model which is You Only Look Once (YOLO). But in our experiment, we have found that despite of having a very good detection precision, YOLO still has some limitations. YOLO processes every image separately even in a continuous video or frames. Because of this much important identification can be lost. So, after the vehicle detection and tracking, inter-vehicle distance estimation is done. {\textcopyright} 2021. International Journal of Advanced Computer Science and Applications. All Rights Reserved.}, keywords = {Bounding-box, Convolution, Convolutional neural network, Convolutional neural networks, Deep neural networks, Distance estimation, Estimation algorithm, Object detection, Object recognition, Real- time, Stereo image processing, Stereoimages, Stereovision, Tracking, Vehicles, Vehicles detection, YOLOv3 deep neural network}, doi = {10.14569/IJACSA.2021.01208101}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85118990281\&doi=10.14569\%2fIJACSA.2021.01208101\&partnerID=40\&md5=225926d9445a4fed55a125a55e519b2f}, author = {Bourja, O. and Derrouz, H. and Abdelali, H.A. and Maach, A. and Thami, R.O.H. and Bourzeix, F.} } @article {Iazzi2020247, title = {Efficient fall activity recognition by combining shape and motion features}, journal = {Computational Visual Media}, volume = {6}, number = {3}, year = {2020}, note = {cited By 2}, pages = {247-263}, abstract = {This paper presents a vision-based system for recognizing when elderly adults fall. A fall is characterized by shape deformation and high motion. We represent shape variation using three features, the aspect ratio of the bounding box, the orientation of an ellipse representing the body, and the aspect ratio of the projection histogram. For motion variation, we extract several features from three blocks corresponding to the head, center of the body, and feet using optical flow. For each block, we compute the speed and the direction of motion. Each activity is represented by a feature vector constructed from variations in shape and motion features for a set of frames. A support vector machine is used to classify fall and non-fall activities. Experiments on three different datasets show the effectiveness of our proposed method. {\textcopyright} 2020, The Author(s).}, keywords = {Activity recognition, Aspect ratio, Direction of motion, Feature vectors, Motion variation, Optical flows, Projection histograms, Shape deformation, Shape variations, Support vector machines, Vision based system}, doi = {10.1007/s41095-020-0183-7}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85090864771\&doi=10.1007\%2fs41095-020-0183-7\&partnerID=40\&md5=21fcdf876b091c6b30486427e7cd9bff}, author = {Iazzi, A. and Rziza, M. and Thami, R.O.H.} } @article {Boudad2020243, title = {Exploring the Use of Word Embedding and Deep Learning in Arabic Sentiment Analysis}, journal = {Advances in Intelligent Systems and Computing}, volume = {1105 AISC}, year = {2020}, note = {cited By 3}, pages = {243-253}, abstract = {In the past couple of years, improving Arabic sentiment Analysis systems has been one of the important fields of research. There are several challenges and issues facing existing systems, especially, handling multiple dialects and feature extraction. Most of those systems are generated using linear classification models and traditional bag-of-word features. In this work, we explore the use of word embedding as a modern feature representation, and Convolutional Neural Networks as a Deep Neural Network in a sentiment classification of Arabic texts. The application of our model on five benchmark datasets has yielded results that outperform previous works on 4 out of 5 datasets. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Arabic texts, Bag of words, Benchmark datasets, Benchmarking, Convolution, Convolutional neural networks, Deep learning, Deep neural networks, Embeddings, Existing systems, Feature representation, Intelligent systems, Linear classification, planning, Sentiment analysis, Sentiment classification, Sustainable development, Word embedding}, doi = {10.1007/978-3-030-36674-2_26}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85080913982\&doi=10.1007\%2f978-3-030-36674-2_26\&partnerID=40\&md5=816d3952c972aa5ce92cb7318fe768aa}, author = {Boudad, N. and Ezzahid, S. and Faizi, R. and Thami, R.O.H.} } @article {Benlakhdar20201659, title = {A robust model using SIFT and gamma mixture model for texture images classification: Perspectives for medical applications}, journal = {Biomedical and Pharmacology Journal}, volume = {13}, number = {4}, year = {2020}, note = {cited By 0}, pages = {1659-1669}, abstract = {The texture analysis of medical images is a powerful calculation tool for the discrimination between pathological and healthy tissue in different organs in medical images. Our paper proposes a novel approach named, GGD-GMM, based on statistical modeling in wavelet domain to describe texture images. Firstly, we propose a robust algorithm based on the combination of the wavelet transform and Scale Invariant Feature Transform (SIFT). Secondly, we implement the aforementioned algorithm and fit the result by using the finite Gamma Mixture Model (GMM). The results, obtained for two benchmark datasets, show that our proposed algorithm has a good relevance as it provides higher classification accuracy compared to some other well known models. Moreover, it displays others advantages relied to Noise-resistant and rotation invariant. Our algorithm could be useful for the analysis of several medical issues. {\textcopyright} 2020 This is an Open Access article licensed under a Creative Commons license: Attribution 4.0 International (CC-BY). Published by Oriental Scientific Publishing Company}, keywords = {Article, curvelet transform, human, human experiment, noise, rotation, scale invariant feature transform}, doi = {10.13005/BPJ/2041}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85100517546\&doi=10.13005\%2fBPJ\%2f2041\&partnerID=40\&md5=8997e98fae845d62807a15ca1c749766}, author = {Benlakhdar, S. and Rziza, M. and Thami, R.O.H.} } @conference {Iazzi20181, title = {Fall detection based on posture analysis and support vector machine}, booktitle = {2018 4th International Conference on Advanced Technologies for Signal and Image Processing, ATSIP 2018}, year = {2018}, pages = {1-6}, doi = {10.1109/ATSIP.2018.8364462}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048484597\&doi=10.1109\%2fATSIP.2018.8364462\&partnerID=40\&md5=d873ad85d14aac2034c2de8077860920}, author = {Iazzi, A. and Rziza, M. and Thami, R.O.H.} } @conference {Bourja2018502, title = {MoVITS: Moroccan Video Intelligent Transport System}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2018-October}, year = {2018}, pages = {502-507}, doi = {10.1109/CIST.2018.8596566}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061428090\&doi=10.1109\%2fCIST.2018.8596566\&partnerID=40\&md5=d3b7baffb731883e8897b8cc5356e33e}, author = {Bourja, O. and Kabbaj, K. and Derrouz, H. and El Bouziady, A. and Thami, R.O.H. and Zennayi, Y. and Bourzeix, F.} } @conference {Hdioud2018, title = {Trajectories modeling and clustering}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230918}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484322\&doi=10.1145\%2f3230905.3230918\&partnerID=40\&md5=1613a1dada41f3fc707505b5dd9dd690}, author = {Hdioud, B. and Tirari, M.E.H. and Thami, R.O.H.} } @conference {Er-Rady2017, title = {Automatic sign language recognition: A survey}, booktitle = {Proceedings - 3rd International Conference on Advanced Technologies for Signal and Image Processing, ATSIP 2017}, year = {2017}, note = {cited By 0}, abstract = {Sign Language, which is a fully visual language with its own grammar, differs largely from that of spoken languages [21]. After nearly 30 years of research, SL recognition still in its infancy when compared to Automatic Speech Recognition. When producing Sign language (SL), different body parts are involved. Most importantly the hands, but also facial expressions and body movements/postures. The recognition of SL is still one of the most challenging problems in gesture recognition. In this survey, we are going to discuss the advancement of sign language recognition through the last decade. In this paper, we provide a review of the state-of-the-art building blocks of Automatic Sign Language Recognition (ASLR) system, from feature extraction up to sign. {\textcopyright} 2017 IEEE.}, doi = {10.1109/ATSIP.2017.8075561}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035355057\&doi=10.1109\%2fATSIP.2017.8075561\&partnerID=40\&md5=2dda585a2de8f963b1f1f0cc678e01d3}, author = {Er-Rady, A. and Faizi, R. and Thami, R.O.H. and Housni, H.} } @conference {Benlakhdar2017, title = {Directional data classification using a hierarchical model of von Mises distribution}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {The von Mises distribution1 VM -pdf is a continuous probability distribution on the circle used in directional statistics. A mixture model of von Mises distribution, which is broad enough to cover symmetry as well as asymmetry, unimodality as well as multimodality of circular data. In this paper we use a model comprised of a hierarchical von Mises mixture distribution mode HmvM- pdf where we consider each class is itself the result of a mixture of subclasses. The parameters of our model are estimated using the expectation maximization algorithm EM modified. The HmvM- pdf model achieves higher accuracy than the mvM model and offer a rich modeling. The suitability of the distributions is judged from the coefficient of determination R2. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090425}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028459799\&doi=10.1145\%2f3090354.3090425\&partnerID=40\&md5=ce3dab33a54f11b997bc9f8f24999c78}, author = {Benlakhdar, S. and Rziza, M. and Thami, R.O.H.} } @conference {Abouyahya201746, title = {Features extraction for facial expressions recognition}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {46-49}, abstract = {The recognition of an expression seems obvious and easy when classified by the human brain. However, it is clearly difficult for a computer to detect human face, extract all of the components characterizing the facial expression and then determine its classification from a single image. Moreover, based on videos, the process becomes even more complex because it must take simultaneously into account the temporal and spatial information available. Also, It should be noted that facial features have an important fact to developing a robust face representation because it aims to select the best of features and reduce dimensionality of features set by finding a new set which contains most of the face features information. For those reasons, this paper present several features extraction approaches for facial expressions recognition as state-of-the-art review. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905642}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019146665\&doi=10.1109\%2fICMCS.2016.7905642\&partnerID=40\&md5=9a7be9980a63d7021cf8136fdf05dfd5}, author = {Abouyahya, A. and El Fkihi, S. and Thami, R.O.H. and Aboutajdine, D.} } @conference {Fezeu2017, title = {Safe and irrefutable decentralized communication: Bringing non-repudiation to mesh networks}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Securing digital communications is a major preoccupation for industry and academia, and a great number of techniques have been successfully developed to achieve that goal. The predominant model for digital communication is that which relies on trusted third-parties, either as local servers or as cloud services. Meanwhile, emerging technologies such as internet-of-things are set to broadcast growing amounts of potentially sensitive information thereby making centralized architectures problematic for privacy and performance reasons and making decentralized networks ever more relevant. However, these third-parties play an important role in securing brokered communications and are essential in providing Authentication and Non-Repudiation according to current models. Clearly, such an approach cannot be used in peer-to-peer networks and there is need for a simple model applicable in fully decentralized networks to provide Non-Repudiation. This document proposes such a model, presents an implementation and discusses its application, particularly pertaining to implementing irrefutable trustless transaction mechanisms - similar to blockchain - in the realm of limited resources. {\textcopyright} 2017 Copyright is held by the owner/author(s). Publication rights licensed to ACM.}, doi = {10.1145/3090354.3090392}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028463840\&doi=10.1145\%2f3090354.3090392\&partnerID=40\&md5=00d345b203cd99bd20f6b7401577a263}, author = {Fezeu, H.K. and Djotio, T. and Thami, R.O.H.} } @article {Boudad2017233, title = {Sentiment classification of Arabic tweets: A supervised approach}, journal = {Journal of Mobile Multimedia}, volume = {13}, number = {3-4}, year = {2017}, note = {cited By 0}, pages = {233-243}, abstract = {Social media platforms have proven to be a powerful source of opinion sharing. Thus, mining and analyzing these opinions has an important role in decision-making and product benchmarking. However, the manual processing of the huge amount of content that these web-based applications host is an arduous task. This has led to the emergence of a new field of research known as Sentiment Analysis. In this respect, our objective in this work is to investigate sentiment classification in Arabic tweets using machine learning. Three classifiers namely Na{\"I}ve Bayes, Support Vector Machine and K-Nearest Neighbor were evaluated on an in-house developed dataset using different features. A comparison of these classifiers has revealed that Support Vector Machine outperforms others classifiers and achieves a 78\% accuracy rate. {\textcopyright} Rinton Press.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85040241035\&partnerID=40\&md5=76bc92b38d241173585a11bc74ae14d7}, author = {Boudad, N. and Faizi, R. and Thami, R.O.H. and Chiheb, R.} } @conference {Ezzahout2013, title = {Tracking people through selected blocks using correlation and optimized similarity measure OSSD}, booktitle = {2013 8th International Conference on Intelligent Systems: Theories and Applications, SITA 2013}, year = {2013}, note = {cited By 0}, abstract = {This letter correspondence exposes results of a simple and appropriate technique for tracking a moving person in a stream captured and stored in a suitable memory. This method is based on research of similar block only in the next frame or on the range of frames in the video sequence in question. The proposed process of tracking people in a single view from one static camera starts like any video surveillance system with background subtraction. Then, a Minimal Boundary Rectangle is used to surround the moving people in each frame. A selected block of 8{\texttimes}8 sizes is used to calculate the values of their entire joint blocks component in the following frames of the used video sequence. Finally, we search the similarity measure: The minimal of the Optimized Sum of Squared Difference and the Fast Fourier Transform Correlation. The same blocks are correlated in the posterior frames corresponding to the minimum error. {\textcopyright} 2013 IEEE.}, doi = {10.1109/SITA.2013.6560819}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883122358\&doi=10.1109\%2fSITA.2013.6560819\&partnerID=40\&md5=468ac280d95dbcf4a9a4d89921e21168}, author = {Ezzahout, A. and Thami, R.O.H. and Hadi, Y.} } @conference {Ezzahout2012179, title = {Detection evaluation and testing region incoming people{\textquoteright}s in a simple camera view}, booktitle = {2nd International Conference on Innovative Computing Technology, INTECH 2012}, year = {2012}, note = {cited By 0}, pages = {179-183}, doi = {10.1109/INTECH.2012.6457804}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874460739\&doi=10.1109\%2fINTECH.2012.6457804\&partnerID=40\&md5=ac4defc953442c5eec7ef981a25f270f}, author = {Ezzahout, A. and Youssef, H.M. and Thami, R.O.H.} } @article {Tabii2009188, title = {A method for automatic score box detection and text recognition in soccer video}, journal = {International Review on Computers and Software}, volume = {4}, number = {2}, year = {2009}, note = {cited By 2}, pages = {188-191}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-69149098315\&partnerID=40\&md5=d08bba2cf1fcb178ebde7204839bb505}, author = {Tabii, Y. and Thami, R.O.H.} }