@article {Haddad20222115, title = {1{\texttimes}16 Rectangular dielectric resonator antenna array for 24 Ghz automotive radar system}, journal = {Bulletin of Electrical Engineering and Informatics}, volume = {11}, number = {4}, year = {2022}, note = {cited By 0}, pages = {2115-2123}, abstract = {This paper presents the design of a 1{\texttimes}16-elements RDRA array for anti-collision radar SRR application at 24 GHz. A single RDRA with high dielectric constant of 41, fed by a simple microstrip line feeding technique, is initially designed to operate around 24 GHz. The RDRA element is further used within an array network structure made up of 16 linear antenna elements to cover the same frequency band. The simulated 1{\texttimes}16 RDRA array can reach a high gain, up to18.6 dB, very high radiation efficiency (97\%), and ensure enough directional radiation pattern properties for radar applications with a 3-dB angular beam width of 6{\textdegree}. To validate our design, RDRA array{\textquoteright} radiation pattern computed results are compared to an equivalent fabricated patch antenna array reported in the literature. {\textcopyright} 2022, Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/eei.v11i4.3068}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85133448647\&doi=10.11591\%2feei.v11i4.3068\&partnerID=40\&md5=a0d9425608cbc6d17f3ea7aeb3718bdd}, author = {Haddad, A. and Aoutoul, M. and Essaaidi, M. and Sabri, K. and Khoukh, A. and Errami, Y. and Had, A. and El Moukhtafi, F. and Jouali, R.} } @conference {Toub2022313, title = {Accelerated operating room scheduling using Lagrangian relaxation method and VNS meta-heuristic}, booktitle = {ACM International Conference Proceeding Series}, year = {2022}, note = {cited By 0}, pages = {313-317}, abstract = {Like any business that produces services, the hospital is part of a process of improving the quality of services provided to patients. As part of this, hospitals are faced with the daunting task of planning operating room patients with budget, time and personnel. Most of the scheduling problems are NP-hard, so researchers have favored the development of heuristics and meta-heuristics to the detriment of exact methods. In a context where high performance computers are in continuous improvement, it is once again interesting to explore exact methods. Here we focus on developing exact methods for solving the operating room planning and scheduling problem. Our contribution is to develop first an accelerated Integer Linear Program (ILP) using the Variable Neighborhood Search (VNS) meta-heuristic to optimize patient waiting time according to the priority of their surgeries. Afterwards, we expose a new lower bound obtained by optimizing the patient waiting time relaxed. The experimental results validated the performance of the accelerated ILP in comparison with the original ILP. Furthermore, we have shown that the Lagrangian relaxation of the original ILP produces a lower bound of good quality. {\textcopyright} 2022 ACM.}, keywords = {Budget control, Healthcare, Heuristic algorithms, Heuristic methods, Integer linear programs, Integer programming, Lagrange multipliers, Lagrangian relaxations, Low bound, Meta-heuristic., Metaheuristic, Operating rooms, Operation research, Optimisations, Scheduling, Surgery, Variable neighborhood search}, doi = {10.1145/3529836.3529928}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85133432791\&doi=10.1145\%2f3529836.3529928\&partnerID=40\&md5=b98b5eb1fa5eebe63a438dcf238538d7}, author = {Toub, M. and Achchab, S. and Souissi, O.} } @article {Asaad2022, title = {AsthmaKGxE: An asthma{\textendash}environment interaction knowledge graph leveraging public databases and scientific literature}, journal = {Computers in Biology and Medicine}, volume = {148}, year = {2022}, note = {cited By 1}, abstract = {Motivation: Asthma is a complex heterogeneous disease resulting from intricate interactions between genetic and non-genetic factors related to environmental and psychosocial aspects. Discovery of such interactions can provide insights into the pathophysiology and etiology of asthma. In this paper, we propose an asthma knowledge graph (KG) built using a hybrid methodology for graph-based modeling of asthma complexity with a focus on environmental interactions. Using a heterogeneous set of public sources, we construct a genetic and pharmacogenetic asthma knowledge graph. The construction of this KG allowed us to shed more light on the lack of curated resources focused on environmental influences related to asthma. To remedy the lack of environmental data in our KG, we exploit the biomedical literature using state-of-the-art natural language processing and construct the first Asthma{\textendash}Environment interaction catalog incorporating a continuously updated ensemble of environmental, psychological, nutritional and socio-economic influences. The catalog{\textquoteright}s most substantiated results are then integrated into the KG. Results: The resulting environmentally rich knowledge graph {\textquotedblright}AsthmaKGxE{\textquotedblright} aims to provide a resource for several potential applications of artificial intelligence and allows for a multi-perspective study of asthma. Our insight extraction results indicate that stress is the most frequent asthma association in the corpus, followed by allergens and obesity. We contend that studying asthma{\textendash}environment interactions in more depth holds the key to curbing the complexity and heterogeneity of asthma. Availability: A user interface to browse and download the extracted catalog as well as the KG are available at http://asthmakgxe.moreair.info/. The code and supplementary data are available on github (https://github.com/ChaiAsaad/MoreAIRAsthmaKGxE). {\textcopyright} 2022 Elsevier Ltd}, keywords = {allergen, Article, Artificial intelligence, Association reactions, asthma, Automated, automated pattern recognition, data base, data extraction, Databases, Diseases, environmental factor, Factual, factual database, Gene-Environment Interaction, Genetic factors, genotype environment interaction, Graphic methods, Heterogeneous disease, human, Humans, Knowledge graph, Knowledge graphs, Knowledge management, Language processing, Learning algorithms, Machine learning, Machine-learning, NAtural language processing, Natural language processing systems, Natural languages, nutritional assessment, obesity, pathophysiology, Pattern recognition, pharmacogenetics, physiological stress, psychological aspect, Public database, Scientific literature, socioeconomics, User interfaces}, doi = {10.1016/j.compbiomed.2022.105933}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135702024\&doi=10.1016\%2fj.compbiomed.2022.105933\&partnerID=40\&md5=2022f55e1de0bbaa947ba6699af6a143}, author = {Asaad, C. and Ghogho, M.} } @article {Slimani20224243, title = {Automated machine learning: the new data science challenge}, journal = {International Journal of Electrical and Computer Engineering}, volume = {12}, number = {4}, year = {2022}, note = {cited By 0}, pages = {4243-4252}, abstract = {The world is changing quite rapidly while increasingly tuning into digitalization. However, it is important to note that data science is what most technology is evolving around and data is definitely the future of everything. For industries, adopting a {\textquotedblleft}data science approach{\textquotedblright} is no longer an option, it becomes an obligation in order to enhance their business rather than survive. This paper offers a roadmap for anyone interested in this research field or getting started with {\textquotedblleft}machine learning{\textquotedblright} learning while enabling the reader to easily comprehend the key concepts behind. Indeed, it examines the benefits of automated machine learning systems, starting with defining machine learning vocabulary and basic concepts. Then, explaining how to, concretely, build up a machine learning model by highlighting the challenges related to data and algorithms. Finally, exposing a summary of two studies applying machine learning in two different fields, namely transportation for road traffic forecasting and supply chain management for demand prediction where the predictive performance of various models is compared based on different metrics. {\textcopyright} 2022 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijece.v12i4.pp4243-4252}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85129666214\&doi=10.11591\%2fijece.v12i4.pp4243-4252\&partnerID=40\&md5=1d5f93103617370f5a92a90901bfe815}, author = {Slimani, I. and Slimani, N. and Achchab, S. and Saber, M. and Farissi, I.E. and Sbiti, N. and Amghar, M.} } @article {Ajallouda2022440, title = {Automatic Key-Phrase Extraction: Empirical Study of Graph-Based Methods}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {440-456}, abstract = {Key-phrases in a document are phrases that provide a high-level description of its content without reading it completely. In some research articles, authors specify key-phrases in the articles they have written. However, the vast majority of books, articles, and web pages published every day, lack key-phrases. The manual extraction of these phrases is a tedious task and takes a long time. For this reason, automatic key-phrase extraction (AKE), which is an area of Text Mining, remains the best solution to overcome these difficulties. Because they are used in many Natural Language Processing (NLP) applications, such as text summarization and text classification. This article presents a comparison of some methods of extracting key-phrases from documents. Especially the graph-based approaches. These approaches are evaluated by their abilities to extract key-phrases. Our work focuses on the study of the performance of these methods in extracting key-phrases, whether from short or long texts, with the aim of providing information that contributes to improving their efficiency. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_33}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135019633\&doi=10.1007\%2f978-3-031-07969-6_33\&partnerID=40\&md5=399dd65be1f33d58c24c1d31777d2b87}, author = {Ajallouda, L. and Fagroud, F.Z. and Zellou, A. and Benlahmar, E.H.} } @article {Zarnoufi2022223, title = {Classical Machine Learning vs Deep Learning for Detecting Cyber-Violence in Social Media}, journal = {Communications in Computer and Information Science}, volume = {1577 CCIS}, year = {2022}, note = {cited By 0}, pages = {223-235}, abstract = {Cyber-violence is a largely addressed problem in e-health researches, its focus is the detection of harmful behavior from the online user-generated text in order to prevent and protect victims. In this work, we tackle the problem of Social Media (SM) text analysis to detect the harmful content that is the common characteristic of cyber-violence acts. For that, we use classical Machine Learning (ML) based on user psychological features that we compare with Deep Learning (DL) techniques in a small dataset setting. The results were in favor of classical ML. The findings highlight that psychological characteristics extracted from user-generated text are strong predictors of his harmful behavior. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Behavioral research, Classical machine learning, Cybe-violence, Deep learning, E health, Ehealth, Feature engineerings, Harmful behavior, Machine-learning, social media, Social networking (online), User-generated}, doi = {10.1007/978-3-031-04447-2_15}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85128968964\&doi=10.1007\%2f978-3-031-04447-2_15\&partnerID=40\&md5=70577a9ac5b248762e97757da4349369}, author = {Zarnoufi, R. and Abik, M.} } @article {Alimoussa2022, title = {Compact Hybrid Multi-Color Space Descriptor Using Clustering-Based Feature Selection for Texture Classification}, journal = {Journal of Imaging}, volume = {8}, number = {8}, year = {2022}, note = {cited By 0}, abstract = {Color texture classification aims to recognize patterns by the analysis of their colors and their textures. This process requires using descriptors to represent and discriminate the different texture classes. In most traditional approaches, these descriptors are used with a predefined setting of their parameters and computed from images coded in a chosen color space. The prior choice of a color space, a descriptor and its setting suited to a given application is a crucial but difficult problem that strongly impacts the classification results. To overcome this problem, this paper proposes a color texture representation that simultaneously takes into account the properties of several settings from different descriptors computed from images coded in multiple color spaces. Since the number of color texture features generated from this representation is high, a dimensionality reduction scheme by clustering-based sequential feature selection is applied to provide a compact hybrid multi-color space (CHMCS) descriptor. The experimental results carried out on five benchmark color texture databases with five color spaces and manifold settings of two texture descriptors show that combining different configurations always improves the accuracy compared to a predetermined configuration. On average, the CHMCS representation achieves 94.16\% accuracy and outperforms deep learning networks and handcrafted color texture descriptors by over 5\%, especially when the dataset is small. {\textcopyright} 2022 by the authors.}, doi = {10.3390/jimaging8080217}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136887352\&doi=10.3390\%2fjimaging8080217\&partnerID=40\&md5=22239b1c87f0cb6fec0b966689303d2b}, author = {Alimoussa, M. and Porebski, A. and Vandenbroucke, N. and El Fkihi, S. and Oulad Haj Thami, R.} } @article {Porebski2022115, title = {Comparison of color imaging vs. hyperspectral imaging for texture classification}, journal = {Pattern Recognition Letters}, volume = {161}, year = {2022}, note = {cited By 0}, pages = {115-121}, abstract = {Many approaches of texture analysis by color or hyperspectral imaging are based on the assumption that the image of a texture can be viewed as a multi-component image, where spatial interactions within and between components are jointly considered (opponent component approach) or not (marginal approach). When color images are coded in multiple color spaces, texture descriptors are based on Multi Color Channel (MCC) representations. By extension, a Multi Spectral Band (MSB) representation can be used to characterize the texture of material surfaces in hyperspectral images. MSB and MCC representations are compared in this paper for texture classification issues. The contribution of each representation is investigated with marginal and/or opponent component strategies. For this purpose, several relevant texture descriptors are considered. Since MSB and MCC representations generate high-dimensional feature spaces, a dimensionality reduction is applied to avoid the curse of dimensionality. Experimental results carried out on three hyperspectral texture databases (HyTexiLa, SpecTex and an original dataset extracted from the Timbers database) show that considering between component interactions in addition to the within ones significantly improves the classification accuracies. The proposed approaches allow also to outperform state of the art hand-designed descriptors and color texture descriptors based on deep learning networks. This study highlights the contribution of hyperspectral imaging compared to color imaging for texture classification purposes but also the advantages of color imaging depending on the considered texture representation. {\textcopyright} 2022}, keywords = {Channel representation, Classification (of information), Color, Color channels, Color imaging, Deep learning, Features selection, Hyperspectral imaging, Image texture, Multi-colors, Multi-spectral, Spectral band, Spectroscopy, Texture classification, Texture descriptors, Texture representation, Textures}, doi = {10.1016/j.patrec.2022.08.001}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135883794\&doi=10.1016\%2fj.patrec.2022.08.001\&partnerID=40\&md5=e0ae6847684c79691a71e29e7b2c273b}, author = {Porebski, A. and Alimoussa, M. and Vandenbroucke, N.} } @article {Adil2022, title = {COVID-19-Related Scientific Literature Exploration: Short Survey and Comparative Study}, journal = {Biology}, volume = {11}, number = {8}, year = {2022}, note = {cited By 1}, abstract = {The urgency of the COVID-19 pandemic caused a surge in the related scientific literature. This surge made the manual exploration of scientific articles time-consuming and inefficient. Therefore, a range of exploratory search applications have been created to facilitate access to the available literature. In this survey, we give a short description of certain efforts in this direction and explore the different approaches that they used. {\textcopyright} 2022 by the authors.}, doi = {10.3390/biology11081221}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137364249\&doi=10.3390\%2fbiology11081221\&partnerID=40\&md5=1602d312118c2b7347846773ddd73594}, author = {Adil, B. and Lhazmir, S. and Ghogho, M. and Benbrahim, H.} } @book {AlAfandy2022127, title = {Deep learning}, series = {Approaches and Applications of Deep Learning in Virtual Medical Care}, year = {2022}, note = {cited By 0}, pages = {127-166}, abstract = {This chapter provides a comprehensive explanation of deep learning including an introduction to ANNs, improving the deep NNs, CNNs, classic networks, and some technical tricks for image classification using deep learning. ANNs, mathematical models for one node ANN, and multi-layers/multi-nodes ANNs are explained followed by the ANNs training algorithm followed by the loss function, the cost function, the activation function with its derivatives, and the back-propagation algorithm. This chapter also outlines the most common training problems with the most common solutions and ANNs improvements. CNNs are explained in this chapter with the convolution filters, pooling filters, stride, padding, and the CNNs mathematical models. This chapter explains the four most commonly used classic networks and ends with some technical tricks that can be used in CNNs model training. {\textcopyright} 2022 by IGI Global.}, doi = {10.4018/978-1-7998-8929-8.ch006}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85139678781\&doi=10.4018\%2f978-1-7998-8929-8.ch006\&partnerID=40\&md5=7ccf9fb39c61262036843028b0c808a6}, author = {Al Afandy, K.A. and Omara, H. and Lazaar, M. and Al Achhab, M.} } @article {ElAlaoui2022435, title = {Deep Stacked Ensemble for Breast Cancer Diagnosis}, journal = {Lecture Notes in Networks and Systems}, volume = {468 LNNS}, year = {2022}, note = {cited By 1}, pages = {435-445}, abstract = {Breast cancer is considered one of the major public health issues and a leading cause of death among women in the world. Its early diagnosis can significantly help to increase the chances of survival rate. Therefore, this study proposes a deep stacking ensemble technique for binary classification of breast histopathological images over the BreakHis dataset. Initially, to form the base learners of the deep stacking ensemble, we trained seven deep learning (DL) techniques based on pre-trained VGG16, VGG19, ResNet50, Inception_V3, Inception_ResNet_V2, Xception, and MobileNet with a 5-fold cross-validation method. Then, a meta-model was built, a logistic regression algorithm that learns how to best combine the predictions of the base learners. Furthermore, to evaluate and compare the performance of the proposed technique, we used: (1) four classification performance criteria (accuracy, precision, recall, and F1-score), and (2) Scott Knott (SK) statistical test to cluster and identify the outperforming models. Results showed the potential of the stacked deep learning techniques to classify breast cancer images into malignant or benign tumor. The proposed deep stacking ensemble reports an overall accuracy of 93.8\%, 93.0\%, 93.3\%, and 91.8\% over the four magnification factors (MF) values of the BreakHis dataset: 40X, 100X, 200X and 400X, respectively. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04826-5_44}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130273664\&doi=10.1007\%2f978-3-031-04826-5_44\&partnerID=40\&md5=4246b5750ce0b8a03e4a80001cdfc5e5}, author = {El Alaoui, O. and Zerouaoui, H. and Idri, A.} } @article {Didi2022427, title = {Design of a Security System Based on Raspberry Pi with Motion Detection}, journal = {Lecture Notes in Networks and Systems}, volume = {455 LNNS}, year = {2022}, note = {cited By 2}, pages = {427-434}, abstract = {Currently, the integration of IoT technology in various fields is very widely used, however, data security remains the essential point to be monitored especially in companies, and also in homes. To control and overcome security-related problems, we adopted Internet of Things technology based on a Raspberry pi4 as the main data processing element in this study. In this paper, we present a simple, efficient, and very reliable study for the monitoring of a video stream coming from a camera installed on a Raspberry pi4 which constitutes the essential element in our project. To reproduce this realization, we did not use a motion sensor, but we took advantage of the algorithm advantages of the Motion software integrated into the free operating system MotionEyeOs on a Raspberry pi4 to trigger motion detection by causing a beep to draw attention. On the other hand, our study was implemented without noticed difficulty, and with a great level of performance and stability which shows that our realization of the Video Stream Surveillance System is successful. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-02447-4_44}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130364762\&doi=10.1007\%2f978-3-031-02447-4_44\&partnerID=40\&md5=b04857d3a313f1841a7b503bccd21ff8}, author = {Didi, Z. and El Azami, I. and Boumait, E.M.} } @conference {Ajallouda2022, title = {Doc2Vec, SBERT, InferSent, and USE Which embedding technique for noun phrases?}, booktitle = {2022 2nd International Conference on Innovative Research in Applied Science, Engineering and Technology, IRASET 2022}, year = {2022}, note = {cited By 4}, abstract = {Phrase embedding is a technique of representing phrases in vector space. A very high effort has been made to develop this technique to improve tasking in various natural language processing (NLP) applications. The evaluation of phrase embedding has been presented in many studies, but most of them focused on the intrinsic or extrinsic evaluation process regardless of the type of the phrase (noun phrases, Verb phrases...). In the literature, there is no study evaluating the embedding of noun phrases, knowing that this type is used by many NLP applications, such as automatic key-phrase extraction (AKE), information retrieval, and question answering. In this article, we will present an empirical study to compare the most common phrase embedding techniques, to determine the most suitable for representing noun phrases. Dataset used in the comparison process consists of the noun phrases from the Inspec and SemEval2010 datasets, to which we have added their manually defined synonyms. {\textcopyright} 2022 IEEE.}, keywords = {Embedding technique, Embeddings, Empirical studies, Key-phrases extractions, Natural language processing applications, Natural language processing systems, Noun phrase, Phrase embedding technique, Question Answering, Vector spaces}, doi = {10.1109/IRASET52964.2022.9738300}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85127985676\&doi=10.1109\%2fIRASET52964.2022.9738300\&partnerID=40\&md5=f274393a28334ba6d338a5dc4e3b5f79}, author = {Ajallouda, L. and Najmani, K. and Zellou, A. and Benlahmar, E.H.} } @article {Abdelhafid2022186, title = {ECG Arrhythmia Classification Using Convolutional Neural Network}, journal = {International Journal of Emerging Technology and Advanced Engineering}, volume = {12}, number = {7}, year = {2022}, note = {cited By 0}, pages = {186-195}, abstract = {This study provides a thorough analysis of earlier DL techniques used to classify the ECG data. The large variability among individual patients and the high expense of labeling clinical ECG records are the main hurdles in automatically detecting arrhythmia by electrocardiogram (ECG). The classification of electrocardiogram (ECG) arrhythmias using a novel and more effective technique is presented in this research. A high-performance electrocardiogram (ECG)-based arrhythmic beats classification system is described in this research to develop a plan with an autonomous feature learning strategy and an effective optimization mechanism, based on the ECG heartbeat classification approach. We propose a method based on efficient 12-layer, the MIT-BIH Arrhythmia dataset{\textquoteright}s five micro-classes of heartbeat types and using the wavelet denoising technique. Compared to state-of-the-art approaches, the newly presented strategy enables considerable accuracy increase with quicker online retraining and less professional involvement. {\textcopyright} 2022 IJETAE Publication House. All rights reserved.}, doi = {10.46338/ijetae0722_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135368671\&doi=10.46338\%2fijetae0722_19\&partnerID=40\&md5=ffb72f915dc01aa63fb482762a1edeb3}, author = {Abdelhafid, E. and Aymane, E. and Benayad, N. and Abdelalim, S. and El, Y.A.M.H. and Rachid, O.H.T. and Brahim, B.} } @article {Douch202269264, title = {Edge Computing Technology Enablers: A Systematic Lecture Study}, journal = {IEEE Access}, volume = {10}, year = {2022}, note = {cited By 1}, pages = {69264-69302}, abstract = {With the increasing stringent QoS constraints (e.g., latency, bandwidth, jitter) imposed by novel applications (e.g., e-Health, autonomous vehicles, smart cities, etc.), as well as the rapidly increasing number of connected IoT (Internet of Things) devices, the core network is becoming increasingly congested. To cope with those constraints, Edge Computing (EC) is emerging as an innovative computing paradigm that leverages Cloud computing and brings it closer to the customer. {\textquoteright}EC{\textquoteright} refers to transferring computing power and intelligence from the central Cloud to the network{\textquoteright}s Edge. With that, EC promotes the idea of processing and caching data at the Edge, thus reducing network congestion and latency. This paper presents a detailed, thorough, and well-structured assessment of Edge Computing and its enabling technologies. Initially, we start by defining EC from the ground up, outlining its architectures and evolution from Cloudlets to Multi-Access Edge Computing. Next, we survey recent studies on the main cornerstones of an EC system, including resource management, computation offloading, data management, network management, etc. Besides, we emphasized EC technology enablers, starting with Edge Intelligence, the branch of Artificial Intelligence (AI) that integrates AI models at resource-constrained edge nodes with significant heterogeneity and mobility. Then, moving on to 5G and its empowering technologies, we explored how EC and 5G complement each other. After that, we studied virtualization and containerization as promising hosting runtime for edge applications. Further to that, we delineated a variety of EC use-case scenarios, e.g., smart cities, e-Health, military applications, etc. Finally, we concluded our survey by highlighting the role of EC integration with future concerns regarding green energy and standardization. {\textcopyright} 2013 IEEE.}, keywords = {5g, 5g mobile communication, 5G mobile communication systems, Cloud-computing, computation offloading, Containerization, Containers, Data handling, Edge computing, Edge intelligence, Fog computing, Information management, Internet of things, License, Military applications, Mobile communications, Multi-access edge computing, Multiaccess, Quality of service, Quality-of-service, Surveys}, doi = {10.1109/ACCESS.2022.3183634}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85132750947\&doi=10.1109\%2fACCESS.2022.3183634\&partnerID=40\&md5=88667ef450dd686759983878427de051}, author = {Douch, S. and Abid, M.R. and Zine-Dine, K. and Bouzidi, D. and Benhaddou, D.} } @article {AlAfandy2022739, title = {Efficient Classification of Remote Sensing Images Using Two Convolution Channels and SVM}, journal = {Computers, Materials and Continua}, volume = {72}, number = {1}, year = {2022}, note = {cited By 0}, pages = {739-753}, abstract = {Remote sensing image processing engaged researchers{\textquoteright} attentiveness in recent years, especially classification. The main problem in classification is the ratio of the correct predictions after training. Feature extraction is the foremost important step to build high-performance image classifiers. The convolution neural networks can extract images{\textquoteright} features that significantly improve the image classifiers{\textquoteright} accuracy. This paper proposes two efficient approaches for remote sensing images classification that utilizes the concatenation of two convolution channels{\textquoteright} outputs as a features extraction using two classic convolution models; these convolution models are the ResNet 50 and the DenseNet 169. These elicited features have been used by the fully connected neural network classifier and support vector machine classifier as input features. The results of the proposed methods are compared with other antecedent approaches in the same experimental environments. Evaluation is based on learning curves plotted during the training of the proposed classifier that is based on a fully connected neural network and measuring the overall accuracy for the both proposed classifiers. The proposed classifiers are used with their trained weights to predict a big remote sensing scene{\textquoteright}s classes for a developed test. Experimental results ensure that, compared with the other traditional classifiers, the proposed classifiers are further accurate. {\textcopyright} 2022 Tech Science Press. All rights reserved.}, keywords = {Classification (of information), Classification of remote sensing image, Convolution, Convolution model, Deep learning, Densenet, Extraction, Features extraction, Fully connected neural network, Image classification, Image Classifiers, Image enhancement, Remote sensing, Remote sensing images, Resnet, Support vector machines, SVM}, doi = {10.32604/cmc.2022.022457}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85125387732\&doi=10.32604\%2fcmc.2022.022457\&partnerID=40\&md5=d49f68f82164e31e29c0276c95a3c3ca}, author = {AlAfandy, K.A. and Omara, H. and El-Sayed, H.S. and Baz, M. and Lazaar, M. and Faragallah, O.S. and Al Achhab, M.} } @article {Bourja2022994, title = {End-to-End Car Make and Model Classification using Compound Scaling and Transfer Learning}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {13}, number = {5}, year = {2022}, note = {cited By 0}, pages = {994-1001}, abstract = {Recently, Morocco has started to invest in IoT systems to transform our cities into smart cities that will promote economic growth and make life easier for citizens. One of the most vital addition is intelligent transportation systems which represent the foundation of a smart city. However, the problem often faced in such systems is the recognition of entities, in our case, car and model makes. This paper proposes an approach that identifies makes and models for cars using transfer learning and a workflow that first enhances image quality and quantity by data augmentation and then feeds the newly generated data into a deep learning model with a scaling feature{\textendash}that is, compound scaling. In addition, we developed a web interface using the FLASK API to make real-time predictions. The results obtained were 80\% accuracy, fine-tuning it to an accuracy rate of 90\% on unseen data. Our framework is trained on the commonly used Stanford Cars dataset. {\textcopyright} 2022. International Journal of Advanced Computer Science and Applications. All Rights Reserved.}, keywords = {Application programming interfaces (API), Compound scaling, Deep learning, Economic growths, Economics, End to end, Image enhancement, Intelligent systems, Intelligent transportation systems, Internet of things, IOT, Model classification, Scalings, Smart city, Transfer learning, Vehicle classification}, doi = {10.14569/IJACSA.2022.01305111}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85131410221\&doi=10.14569\%2fIJACSA.2022.01305111\&partnerID=40\&md5=aeb1c5a894ab70005066f491ebf3623c}, author = {Bourja, O. and Maach, A. and Zannouti, Z. and Derrouz, H. and Mekhzoum, H. and Abdelali, H.A. and Thami, R.O.H. and Bourzeix, F.} } @article {Derrouz20225561, title = {End-to-end quantum-inspired method for vehicle classification based on video stream}, journal = {Neural Computing and Applications}, volume = {34}, number = {7}, year = {2022}, note = {cited By 1}, pages = {5561-5576}, abstract = {Intelligent Transportation Systems (ITS) are the most widely used systems for road traffic management. The vehicle type classification (VTC) is a crucial ITS task due to its capability to gather valuable traffic information. However, designing a performant VTC method is challenging due to the considerable intra-class variation of vehicles. This paper presents a new quantum decision-based method for VTC applied to video streaming. This method allows for earlier decision-making by considering a few stream{\textquoteright}s images. Our method is threefold. First, the video stream is acquired and preprocessed following a specific pipeline. Second, we aim to detect and track vehicles. Therefore, we apply a deep learning-based model to detect vehicles, and then a vehicle tracking algorithm is used to track each detected vehicle. Third, we seek to classify the tracked vehicle according to six defined classes. Furthermore, we transform the tracked vehicles according to a pipeline, consisting of the histogram of oriented gradients (HOG), and principal component analysis (PCA) methods. Then, we estimate the vehicles{\textquoteright} probabilities of belonging to each class by training multilayer perceptron (MLP) classifier with the resulting features. To assign a class to a vehicle, we apply a quantum-inspired probability integrator that handles each frame{\textquoteright}s information flow. The unique characteristics of the work we propose, compared to the existing ones, are expressed in the decision-making process, since the former requires a sequence of frames of different sizes, compared to the image-based-decision made by the other methods. Our method outperformed the baseline methods with an accuracy up to 96\%. {\textcopyright} 2021, The Author(s), under exclusive licence to Springer-Verlag London Ltd., part of Springer Nature.}, keywords = {Classification (of information), Decision making, Decisions makings, Deep learning, Early decision, Early decision making, End to end, Intelligent systems, Intelligent transportation systems, Intelligent vehicle highway systems, Pipelines, Principal component analysis, Quantum inspired algorithm, Tracked vehicles, Type classifications, Vehicle classification, Vehicle type classification, Vehicle types, Video streaming}, doi = {10.1007/s00521-021-06718-9}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85122251939\&doi=10.1007\%2fs00521-021-06718-9\&partnerID=40\&md5=8e1d19359f8245b2f82d8558670d4ba1}, author = {Derrouz, H. and Cabri, A. and Ait Abdelali, H. and Oulad Haj Thami, R. and Bourzeix, F. and Rovetta, S. and Masulli, F.} } @article {Daoui2022, title = {Environmental, Climatic, and Parasite Molecular Factors Impacting the Incidence of Cutaneous Leishmaniasis Due to Leishmania tropica in Three Moroccan Foci}, journal = {Microorganisms}, volume = {10}, number = {9}, year = {2022}, note = {cited By 0}, abstract = {{Cutaneous leishmaniasis (CL) occurring due to Leishmania tropica is a public health problem in Morocco. The distribution and incidence of this form of leishmaniasis have increased in an unusual way in the last decade, and the control measures put in place are struggling to slow down the epidemic. This study was designed to assess the impact of climatic and environmental factors on CL in L. tropica foci. The data collected included CL incidence and climatic and environmental factors across three Moroccan foci (Foum Jemaa, Imintanout, and Ouazzane) from 2000 to 2019. Statistical analyses were performed using the linear regression model. An association was found between the occurrence of CL in Imintanout and temperature and humidity (r2 = 0.6076}, doi = {10.3390/microorganisms10091712}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85138700280\&doi=10.3390\%2fmicroorganisms10091712\&partnerID=40\&md5=515be27b75ee2affaf29fb221f2aefe8}, author = {Daoui, O. and Bennaid, H. and Kbaich, M.A. and Mhaidi, I. and Aderdour, N. and Rhinane, H. and Bouhout, S. and Akarid, K. and Lemrani, M.} } @article {Ahajjam2022, title = {Experimental investigation of variational mode decomposition and deep learning for short-term multi-horizon residential electric load forecasting}, journal = {Applied Energy}, volume = {326}, year = {2022}, note = {cited By 0}, abstract = {With the booming growth of advanced digital technologies, it has become possible for users as well as distributors of energy to obtain detailed and timely information about the electricity consumption of households. These technologies can also be used to forecast the household{\textquoteright}s electricity consumption (a.k.a. the load). In this paper, Variational Mode Decomposition and deep learning techniques are investigated as a way to improve the accuracy of the load forecasting problem. Although this problem has been studied in the literature, selecting an appropriate decomposition level and a deep learning technique providing better forecasting performance have garnered comparatively less attention. This study bridges this gap by studying the effect of six decomposition levels and five distinct deep learning networks. The raw load profiles are first decomposed into intrinsic mode functions using the Variational Mode Decomposition in order to mitigate their non-stationary aspect. Then, day, hour, and past electricity consumption data are fed as a three-dimensional input sequence to a four-level Wavelet Decomposition Network model. Finally, the forecast sequences related to the different intrinsic mode functions are combined to form the aggregate forecast sequence. The proposed method was assessed using load profiles of five Moroccan households from the Moroccan buildings{\textquoteright} electricity consumption dataset (MORED) and was benchmarked against state-of-the-art time-series models and a baseline persistence model. {\textcopyright} 2022 Elsevier Ltd}, keywords = {data set, decomposition, Decomposition level, Deep learning, Electric load forecasting, Electric power plant loads, Electric power utilization, electricity, electricity supply, Electricity-consumption, Experimental investigations, forecasting method, Housing, Intrinsic mode functions, learning, Learning algorithms, Learning systems, Learning techniques, Load profiles, Multi-horizon forecasting, Residential load forecasting, Short-term residential load forecasting, Variational mode decomposition, Wavelet decomposition}, doi = {10.1016/j.apenergy.2022.119963}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85138343844\&doi=10.1016\%2fj.apenergy.2022.119963\&partnerID=40\&md5=322ce14ee40d2de7bbdfa72a4aabc718}, author = {Ahajjam, M.A. and Bonilla Licea, D. and Ghogho, M. and Kobbane, A.} } @article {Radouane20221460, title = {Fusion of Gabor filter and steerable pyramid to improve iris recognition system}, journal = {IAES International Journal of Artificial Intelligence}, volume = {11}, number = {4}, year = {2022}, note = {cited By 0}, pages = {1460-1468}, abstract = {Iris recognition system is a technique of identifying people using their distinctive features. Generally, this technique is used in security, because it offers a good reliability. Different researchers have proposed new methods for iris recognition system to increase its effectiveness. In this paper, we propose a new method for iris recognition based on Gabor filter and steerable pyramid decomposition. It{\textquoteright}s an efficient and accurate linear multi-scale, multi-orientation image decomposition to capture texture details of an image. At first, the iris image is segmented, normalized and decomposed by Gabor filter and steerable pyramid method. Multiple sub-band are generated by applying steerable pyramid on the input image. High frequency sub-band is ignored to eliminate noise and increase the accuracy. The method was validated using Chinese Academy of Sciences Institute of Automation (CASIA-v4), Indian Institute of Technology Delhi (IITD) and University of Phoenix Online (UPOL) databases. The performance of the proposed method is better than the most methods in the literature. The proposed algorithm provides accuracy of 99.99\%. False acceptance rate (FAR), equal error rate (EER) and genuine acceptance rate (GAR) have also been improved. {\textcopyright} 2022, Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijai.v11.i4.pp1460-1468}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136268629\&doi=10.11591\%2fijai.v11.i4.pp1460-1468\&partnerID=40\&md5=54ad763c812d3874d3c1e97ff0cfc8f8}, author = {Radouane, M. and Zouggari, N.I. and Amraoui, A. and Amraoui, M.} } @conference {Rassam202278, title = {Fuzzy Logic-based N-gram Graph Technique for Evaluating Textual Documents Indexes}, booktitle = {2022 4th International Conference on Computer Communication and the Internet, ICCCI 2022}, year = {2022}, note = {cited By 0}, pages = {78-82}, abstract = {Fuzzy logic is a term that is used more and more often by researchers specialized in information retrieval and indexing. It is supposed to fill the gaps of the current domain by adding meaning to the information. In this article, the main goal is introducing the fuzzy logic approach in the field of the representation of a corpus of documents, more precisely in the proposal of a new method based on fuzzy functions that allow the evaluation of the generated indexes of a specific corpus.The evaluation is based on calculating the degrees of relevance of a term in relation to another whether it is in the same document of the corpus or in two or more completely different documents of the same corpus. {\textcopyright} 2022 IEEE.}, keywords = {{\textquoteright}current, Computer circuits, Degree of relevance, Fuzzy function, Fuzzy logic, Fuzzy logic approach, Fuzzy-Logic, Graph technique, Indexing, Indexing (of information), N-grams, Textual documents}, doi = {10.1109/ICCCI55554.2022.9850268}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137083735\&doi=10.1109\%2fICCCI55554.2022.9850268\&partnerID=40\&md5=3d771186a1a254dd249e3647a64918b2}, author = {Rassam, L. and Aldiebesghanem, C. and Zellou, A. and Lahmar, E.B.} } @article {Bouzbita202248, title = {A Fuzzy Meta Model for Adjusting Ant Colony System Parameters}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {48-58}, abstract = {Metaheuristic algorithms have become an important choice for solving complex optimization problems which are difficult to solve by conventional methods. But, like many other metaheuristic algorithms, ant colony system (ACS) has the problem of parameters setting. In the last few years, different approaches have been proposed to deal whit this problem. Recently the use of fuzzy logic in dynamic parameters adaptation of metaheuristic algorithms is gaining a considerable interest from the researchers. In this paper, a meta model for modifying the parameters of ACS during runtime based on fuzzy logic concept is presented. The main idea is to study the effect of modifying all the parameters of the ACS on the same time on its performance. To compare the efficiency of the proposed approaches, they were applied to a set of traveling salesman problem instances. Also, a comparison with the standard ACS and some literature results are discussed. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135092796\&doi=10.1007\%2f978-3-031-07969-6_4\&partnerID=40\&md5=319150216144782de4bcb5697185e658}, author = {Bouzbita, S. and Afia, A.E.} } @article {Ounasser202245, title = {Generative and Autoencoder Models for Large-Scale Mutivariate Unsupervised Anomaly Detection}, journal = {Smart Innovation, Systems and Technologies}, volume = {237}, year = {2022}, note = {cited By 0}, pages = {45-58}, abstract = {Anomaly detection is a major problem that has been well studied in various fields of research and fields of application. In this paper, we present several methods that can be built on existing deep learning solutions for unsupervised anomaly detection, so that outliers can be separated from normal data in an efficient manner. We focus on approaches that use generative adversarial networks (GAN) and autoencoders for anomaly detection. By using these deep anomaly detection techniques, we can overcome the problem that we need to have a large-scale anomaly data in the learning phase of a detection system. So, we compared various methods of machine based and deep learning anomaly detection with its application in various fields. This article used seven available datasets. We report the results on anomaly detection datasets, using performance metrics, and discuss their performance on finding clustered and low density anomalies. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd.}, keywords = {Anomaly detection, Auto encoders, Deep learning, Detection system, Generative adversarial networks, ITS applications, Large-scales, Learning phasis, Performance, Performance metrices, Unsupervised anomaly detection}, doi = {10.1007/978-981-16-3637-0_4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85116885193\&doi=10.1007\%2f978-981-16-3637-0_4\&partnerID=40\&md5=d381663c0ba073f5139a00cbfe2819c8}, author = {Ounasser, N. and Rhanoui, M. and Mikram, M. and Asri, B.E.} } @conference {Sabiri2022428, title = {Impact of Hyperparameters on the Generative Adversarial Networks Behavior}, booktitle = {International Conference on Enterprise Information Systems, ICEIS - Proceedings}, volume = {1}, year = {2022}, note = {cited By 0}, pages = {428-438}, abstract = {Generative adversarial networks (GANs) have become a full-fledged branch of the most important neural network models for unsupervised machine learning. A multitude of loss functions have been developed to train the GAN discriminators and they all have a common structure: a sum of real and false losses which depend only on the real losses and generated data respectively. A challenge associated with an equally weighted sum of two losses is that the formation can benefit one loss but harm the other, which we show causes instability and mode collapse. In this article, we introduce a new family of discriminant loss functions which adopts a weighted sum of real and false parts. With the use the gradients of the real and false parts of the loss, we can adaptively choose weights to train the discriminator in the sense that benefits the stability of the GAN model. Our method can potentially be applied to any discriminator model with a loss which is a sum of the real and fake parts. Our method consists in adjusting the hyper-parameters appropriately in order to improve the training of the two antagonistic models Experiences validated the effectiveness of our loss functions on image generation tasks, improving the base results by a significant margin on dataset Celebdata. Copyright {\textcopyright} 2022 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved.}, keywords = {Common structures, Computer vision, Deep learning, Generative adversarial networks, Hyper-parameter, Image enhancement, Learning systems, Loss functions, Machine-learning, Network behaviors, Neural network model, Neural-networks, Unsupervised machine learning, Weighted Sum}, doi = {10.5220/0011115100003179}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140905402\&doi=10.5220\%2f0011115100003179\&partnerID=40\&md5=a465ab3cd8d0650f9e310edf8a94d422}, author = {Sabiri, B. and Asri, B. and Rhanoui, M.} } @article {Miloudi2022191, title = {The Impact of Instance Selection Algorithms on Maintenance Effort Estimation for Open-Source Software}, journal = {Lecture Notes in Networks and Systems}, volume = {470 LNNS}, year = {2022}, note = {cited By 0}, pages = {191-201}, abstract = {Open-source software are very used nowadays in the industry, and the performance of the estimation of their maintenance effort becomes an interesting research topic. In this context, researchers have conducted many open-source software maintenance effort estimation (O-MEE) studies based on statistical and machine learning (ML) techniques for better estimation. This study focuses on the impact of instance selection on the performance of ML techniques in O-MEE, mainly for bug resolution. An empirical study was conducted using three techniques: K-nearest neighbor (kNN), support vector machine (SVM), and multinomial na{\"\i}ve Bayes (MNB) using all-kNN instance selection algorithms on three datasets: Eclipse JDT, Eclipse Platform, and Mozilla Thunderbird datasets. This study reports on a set of 18 experiments and a comparison of the results. The results of this study show that instance selection helped make ML techniques more performant. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04829-6_17}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130234489\&doi=10.1007\%2f978-3-031-04829-6_17\&partnerID=40\&md5=7ac0a2d5915327c057f3ec68815ba211}, author = {Miloudi, C. and Cheikhi, L. and Idri, A. and Abran, A.} } @article {Najmani2022556, title = {The Impact of the k-Nearest Neighbor Parameters in Collaborative Filtering Recommender Systems}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {556-567}, abstract = {Recommender Systems (RS) have become very important recently, they are a main component of many applications in different fields. They aim to give beneficial information according to the profile of each user among the huge existing online information. RS are based on several approaches to provide the best results and give satisfaction to the active user. Collaborative filtering is one of these approaches. It helps to choose a product according to the consumer{\textquoteright}s preference from many and various choices. It uses the k-Nearest Neighbor (kNN) technique for the extraction of similar users from the group of users. In this paper, we will study the effect of the parameters of the kNN algorithm on the obtained results. For that, we have varied the value of k, then we have measured for each value the prediction accuracy, using the Root Mean Square Error (RMSE), and the Mean Absolute Error (MAE) metrics. The experiments are carried out also to find the value of k which gives good results in both metrics. Then we have calculated also the RMSE and MAE metrics for different similarities in order to find the similarity which gives good results compared to others. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_42}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135066110\&doi=10.1007\%2f978-3-031-07969-6_42\&partnerID=40\&md5=c48c9d53d83b702abcc99c35254e6b72}, author = {Najmani, K. and Ajallouda, L. and Benlahmar, E.H. and Sael, N. and Zellou, A.} } @article {Afoudi2022543, title = {Improved Content Based Filtering Using Unsupervised Machine Learning on~Movie Recommendation}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {543-555}, abstract = {In our world of massive entertainment options and with thousands of choices on every movie platform, the user found himself in the circle of confusion over which movie to choose. Here the solution is using the recommender systems to predict user{\textquoteright}s interests and recommend items most likely to interest them. Recommender systems are utilized in a variety of areas and are most commonly recognized as playlist generators for video and music services, product recommenders for online stores as AliExpress and Amazon.., or content recommenders for social media platforms and open web content recommenders. In this paper, we propose a new powerful recommender system that combines Content Based Filtering (CBF) with the popular unsupervised machine learning algorithm K-means clustering. To recommend items to an active user, K-means is then applied to the movie data to give each movie a specific cluster and after founding the cluster to which the user belongs, the content-based approach applies to all movies with the same cluster. The experimentation of well-known movies, we show that the proposed system satisfies the predictability of the Content-Based algorithm in GroupLens. In addition, our proposed system improves the performance and temporal response speed of the traditional collaborative filtering technique and the content-based technique. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_41}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135063306\&doi=10.1007\%2f978-3-031-07969-6_41\&partnerID=40\&md5=56bc73a22c970a0c111f82484c887adb}, author = {Afoudi, Y. and Lazaar, M. and Al Achhab, M. and Omara, H.} } @article {Hakkoum2022, title = {Interpretability in the medical field: A systematic mapping and review study}, journal = {Applied Soft Computing}, volume = {117}, year = {2022}, note = {cited By 4}, abstract = {Context: Recently, the machine learning (ML) field has been rapidly growing, mainly owing to the availability of historical datasets and advanced computational power. This growth is still facing a set of challenges, such as the interpretability of ML models. In particular, in the medical field, interpretability is a real bottleneck to the use of ML by physicians. Therefore, numerous interpretability techniques have been proposed and evaluated to help ML gain the trust of its users. Methods: This review was carried out according to the well-known systematic map and review process to analyze the literature on interpretability techniques when applied in the medical field with regard to different aspects: publication venues and publication year, contribution and empirical types, medical and ML disciplines and objectives, ML black-box techniques interpreted, interpretability techniques investigated, their performance and the best performing techniques, and lastly, the datasets used when evaluating interpretability techniques. Results: A total of 179 articles (1994{\textendash}2020) were selected from six digital libraries: ScienceDirect, IEEE Xplore, ACM Digital Library, SpringerLink, Wiley, and Google Scholar. The results showed that the number of studies dealing with interpretability increased over the years with a dominance of solution proposals and experiment-based empirical type. Diagnosis, oncology, and classification were the most frequent medical task, discipline, and ML objective studied, respectively. Artificial neural networks were the most widely used ML black-box techniques investigated for interpretability. Additionally, global interpretability techniques focusing on a specific black-box model, such as rules, were the dominant explanation types, and most of the metrics used to evaluate interpretability were accuracy, fidelity, and number of rules. Moreover, the variety of the techniques used by the selected papers did not allow categorization at the technique level, and the high number of the sum of evaluations (671) of the articles raised a suspicion of subjectivity. Datasets that contained numerical and categorical attributes were the most frequently used in the selected studies. Conclusions: Further effort is needed in disciplines other than diagnosis and classification. Global techniques such as rules are the most used because of their comprehensibility to doctors, but new local techniques should be explored more in the medical field to gain more insights into the model{\textquoteright}s behavior. More experiments and comparisons against existing techniques are encouraged to determine the best performing techniques. Lastly, quantitative evaluation of interpretability and physicians{\textquoteright} implications in interpretability techniques evaluation is highly recommended to evaluate how the techniques will perform in real-world scenarios. It can ensure the soundness of the techniques and help gain trust in black-box models in medical environments. {\textcopyright} 2022 Elsevier B.V.}, keywords = {Black box modelling, Black boxes, Computational power, Computer aided diagnosis, Digital libraries, Explainability, Historical dataset, Interpretability, Machine learning, Medical fields, Neural networks, Systematic mapping, Systematic Review, XAI}, doi = {10.1016/j.asoc.2021.108391}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85122624142\&doi=10.1016\%2fj.asoc.2021.108391\&partnerID=40\&md5=38db4d1f5c417a07d0a3204639e157a2}, author = {Hakkoum, H. and Abnane, I. and Idri, A.} } @article {Ajallouda2022283, title = {KP-USE: An Unsupervised Approach for Key-Phrases Extraction from Documents}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {13}, number = {4}, year = {2022}, note = {cited By 4}, pages = {283-289}, abstract = {Automatic key-phrase extraction (AKE) is one of the most popular research topics in the field of natural language processing (NLP). Several techniques were used to extract the key-phrases: statistical, graph-based, classification algorithms, deep learning, and embedding techniques. AKE approaches that use embedding techniques are based on calculating the semantic similarity between a vector representing the document and the vectors representing the candidate phrases. However, most of these methods only give acceptable results in short texts such as abstracts paper, but on the other hand, their performance remains weak in long documents because it is represented by a single vector. Generally, the key phrases of a document are often expressed in certain parts of the document as, the title, the summary, and to a lesser extent in the introduction and the conclusion, and not of the entire document. For this reason, we propose in this paper KP-USE. A method extracts key-phrases from long documents based on the semantic similarity of candidate phrases to parts of the document containing keyphrases. KP-USE makes use of the Universal Sentence Encoder (USE) as an embedding method for text representation. We evaluated the performance of the proposed method on three datasets containing long papers, namely, NUS, Krapivin2009, and SemEval2010, where the results showed its performance outperforms recent AKE methods which are based on embedding techniques. {\textcopyright} 2022. All Rights Reserved.}, keywords = {Deep learning, Embedding technique, Embeddings, Extraction, Graph-based classifications, Graphic methods, Key-phrase, Key-phrases extractions, Natural language processing systems, Performance, Research topics, Semantic similarity, Semantics, Signal encoding, Statistical graphs, Universal sentence encoder, Unsupervised approaches}, doi = {10.14569/IJACSA.2022.0130433}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85129921696\&doi=10.14569\%2fIJACSA.2022.0130433\&partnerID=40\&md5=880d07ce539281dced3a51a82de6a16c}, author = {Ajallouda, L. and Fagroud, F.Z. and Zellou, A. and Lahmar, E.B.} } @article {Yousfi2022205, title = {The Large Annotated Corpus for the Arabic Language (LACAL)}, journal = {Studies in Computational Intelligence}, volume = {1061}, year = {2022}, note = {cited By 0}, pages = {205-219}, abstract = {Annotated corpora has an important role in the NLP field. They are used in almost all NLP applications: automatic dictionary construction, text analysis, information retrieval, machine translation, etc. Annotated corpora are the basis for training operation in NLP systems. Without these corpora, it is difficult to build an efficient system that takes into account all variations and linguistic phenomena. In this paper, we present the annotated corpus we developed. This corpus contains more than 12 million different words labeled by different types of labels: syntactic, morphological, and semantic. This large corpus adds value to the Arabic NLP field, and will certainly improve the quality of the training phase of Arabic NLP systems. Moreover it can be a suitable corpus to test and evaluate the quality of these systems. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-14748-7_12}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85139393388\&doi=10.1007\%2f978-3-031-14748-7_12\&partnerID=40\&md5=5e4ea0e7df29b510ad4dae099d9f1725}, author = {Yousfi, A. and Boumehdi, A. and Laaroussi, S. and Makoudi, R. and Aouragh, S.L. and Gueddah, H. and Habibi, B. and Nejja, M. and Said, I.} } @article {Benaida2022476, title = {Machine and~Deep Learning Predictive Techniques for~Blood Glucose Level}, journal = {Lecture Notes in Networks and Systems}, volume = {468 LNNS}, year = {2022}, note = {cited By 0}, pages = {476-485}, abstract = {Allowing diabetic patients to predict their BGL is an important task for self-management of their metabolic disease. This allows to avoid hypo or hyperglycaemia by taking appropriate actions. Currently, this is possible due to the development of machine and deep learning techniques which are successfully used in many prediction tasks. This paper evaluates and compares the performances of six ML/DL techniques to forecast BGL predictions; four DL techniques: CNN, LSTM, GRU, DBN and two ML/statistic techniques: SVR, and AR. The evaluation of the performance of the six regressors were in term of four criteria: RMSE, MAE, MMRE, and PRED. In addition, the Scott-Knott were used to evaluate the statistical significance test and to rank the regressors. The results show that AR was the best for 5~min ahead forecasting with a mean of RMSE equal to 8.67~mg/dl. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04826-5_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130238876\&doi=10.1007\%2f978-3-031-04826-5_48\&partnerID=40\&md5=8889c0cf2c91459b8f32389a846059f5}, author = {Benaida, M. and Abnane, I. and Idri, A. and El Idrissi, T.} } @conference {Miloudi2022, title = {Maintenance Effort Estimation for Open Source Software: Current trends}, booktitle = {CEUR Workshop Proceedings}, volume = {3272}, year = {2022}, note = {cited By 0}, abstract = {Software maintenance of Open Source Software (OSS) has gained more attention in recent years and facilitated by the help of the Internet. Since volunteers in OSS do not record the effort of their contribution in maintenance tasks, researchers have to indirectly estimate the maintenance effort of such software. A review of the published OSS-MEE models has been performed using a set of 65 selected studies in a Systematic Mapping Study (SMS). This study analyses, discusses the state of the art about O-MEE and identifies trends through five additional Mapping Questions (MQs). In summary, various maintenance effort estimation (MEE) models were developed for OSS or industrial software. Researchers have mostly expressed the maintenance effort in terms of bug fixing, bug resolution time and severity in conjunction with bug report attributes. Regression Analysis and Bayesian Networks were most used estimation techniques, Recall, Precision, R2 and F-measure evaluation criteria in addition to k-fold cross validation method. Most of the models were implemented using WEKA, R software and MATLAB. More than half of the selected studies lacked of any validity analysis of their results. Trends are also discussed to identify a set of implications for researchers. {\textcopyright} 2020 Copyright for this paper by its authors.}, keywords = {{\textquoteright}current, Bayesian networks, Bug-fixing, Computer software maintenance, Effort Estimation, Effort estimation model, Industrial software, Maintenance efforts, Maintenance tasks, Mapping, MATLAB, Open source software, Open systems, Open-source softwares, Regression analysis, State of the art, Systematic mapping studies}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85142872497\&partnerID=40\&md5=d5e99ad4babcbb3a4fd5f50001fae51c}, author = {Miloudi, C. and Cheikhi, L. and Abran, A. and Idri, A.} } @conference {Sabiri2022418, title = {Mechanism of Overfitting Avoidance Techniques for Training Deep Neural Networks}, booktitle = {International Conference on Enterprise Information Systems, ICEIS - Proceedings}, volume = {1}, year = {2022}, note = {cited By 0}, pages = {418-427}, abstract = {The objective of a deep learning neural network is to have a final model that performs well both on the data used to train it and the new data on which the model will be used to make predictions. Overfitting refers to the fact that the predictive model produced by the machine learning algorithm adapts well to the training set. In this case, the predictive model will capture the generalizable correlations and the noise produced by the data and will be able to give very good predictions on the data of the training set, but it will predict badly on the data that it has not yet seen during his learning phase. This paper proposes two techniques among many others to reduce or prevent overfitting. Furthermore, by analyzing dynamics during training, we propose a consensus classification algorithm that avoids overfitting, we investigate the performance of these two types of techniques in convolutional neural network. Early stopping allowing to save the hyper-parameters of a model at the right time. And the dropout making the learning of the model harder allowing to gain up to more than 50\% by decreasing the loss rate of the model. Copyright {\textcopyright} 2022 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved.}, keywords = {Computer vision, Convolution, Convolutional neural network, Convolutional neural networks, Data overfitting, Deep learning, Deep neural networks, Dropout, Early stopping, Forecasting, Learning algorithms, Learning systems, Machine-learning, Max-pooling, Overfitting, Predictive models, Training sets}, doi = {10.5220/0011114900003179}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140895255\&doi=10.5220\%2f0011114900003179\&partnerID=40\&md5=286b687a1791aefc7394085755e790c8}, author = {Sabiri, B. and Asri, B.E. and Rhanoui, M.} } @article {Labghough2022, title = {Mixed RF/FSO SWIPT-based OSLMD Coded AF Cooperative Communication System: Performance Analysis}, journal = {IEEE Transactions on Green Communications and Networking}, year = {2022}, note = {cited By 0}, pages = {1-1}, abstract = {In this paper, the performance analysis of simultaneous wireless information and power transfer (SWIPT) using time switching (TS) protocol-based amplify-and-forward (AF) dual-hop mixed radio-frequency/free-space optic (RF/FSO) coded and uncoded communication system (CC and UC) is investigated. Specifically, we look at using the One-Step Majority-Logic Decodable (OSMLD) codes at the source and a majority logic decoding algorithm (MLGD) at the destination, jointly with the use of maximal-ratio combining and SWIPT/TS techniques at the relay. Furthermore, the RF link undergoes Nakagami-m model, while the second-hop is subject to M\&$\#$x00E1;laga-M atmospheric turbulence with the effect of pointing errors. Closed-form and asymptotic formulations for the average bit error probability (ABEP) are obtained in terms of the system parameters for both CC and UC scenarios in addition of the average channel capacity analysis. Computer-based Monte Carlo simulations were used to numerically validate all of the obtained mathematical formulations. Expectedly, it is shown that significant coding gains are achieved using OSMLD codes in comparison with UC. In addition, above a specific threshold of energy efficiency, the system performance gets steady in the high signal-to-noise-ratio regime. Moreover, the decoding method used in this work has been shown to be suitable for the SWIPT/TS-assisted RF/FSO coded AF cooperative communication system where high performance alongside low computational complexity are required. IEEE}, keywords = {Average bit error probability, Bit error rate, Code, Communications systems, Computer circuits, Cooperative communication, Decodable codes, Decoding, Decoding algorithm, energy efficiency, Energy harvesting, Energy transfer, Errors, Fading channels, Fadings channels, Freespace optics, Intelligent systems, Majority logic decoding, Majority logic decoding algorithm decoding, Malagum-M channel, Maximal-ratio combining, Mixed radio-frequency/free-space optic system, Monte Carlo methods, Nakagami-m fading channels, One-step majority-logic decodable code., Optics systems, Probability, Radiofrequencies, Relay, Signal to noise ratio}, doi = {10.1109/TGCN.2022.3207096}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85139431588\&doi=10.1109\%2fTGCN.2022.3207096\&partnerID=40\&md5=5e23fc48b261f30bd3d69e9c937abedb}, author = {Labghough, S. and Ayoub, F. and Bouanani, F.E. and Belkasmi, M. and Qaraqe, K.A.} } @conference {Clero2022, title = {Monitoring Of Geotechnical Instabilities at Deep Underground Mines, Case of Draa Sfar Mine, Morocco}, booktitle = {World Congress on Civil, Structural, and Environmental Engineering}, year = {2022}, note = {cited By 0}, abstract = {The scope of this paper concerns the identification of underground geotechnical instabilities and the analysis of the rock mass monitoring data at deep underground mines, taking as a study case Draa Sfar mine in Morocco. In fact, geotechnical conditions complexity increases in the mine{\textquoteright}s deeper levels. Therefore, rock mass behaviour analysis, on-site geotechnical instrumentation and continuous monitoring of the medium are strongly required to avoid geotechnical risks. In this context, this paper focuses on presenting, first, a general state of the art about the most frequent rock mass instabilities encountered in the underground media, their classification, and their monitoring methods. In the following section, this paper provides general information about Draa Sfar mine and presents its geotechnical monitoring data analysis. At the end, conclusions about the current monitoring results and perspectives about innovative instrumentation solutions are discussed. {\textcopyright} 2022, Avestia Publishing. All rights reserved.}, doi = {10.11159/icgre22.212}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85139066071\&doi=10.11159\%2ficgre22.212\&partnerID=40\&md5=58c00b8254f4f94cb74bf2d21ae22607}, author = {Clero, K. and Ed-Diny, S. and Soror, T. and Rziki, S. and Achalhi, M. and El Fkihi, S. and Boanarijesy, A.} } @conference {Abbal2022, title = {Multi-level multi-capacitated facility location problem: formulation and resolution.}, booktitle = {2022 IEEE 6th International Conference on Logistics Operations Management, GOL 2022}, year = {2022}, note = {cited By 0}, abstract = {Facility location problem is one of the most problems studied since the 1950 years; many variants have been introduced in order to deal with real-world problems. In 2016, a new variant was developed to generalize the p-median problem, called {\textquoteright}budget constraint multi-capacitated location problem{\textquoteright} (BMCLP). This paper aims to generalize the BMCLP onto the multi-level, a problem that arises in many scopes as telecommunication, freight transportation, etc. Therefore, we proposed a mathematical formulation as mixed integer, called {\textquoteright}Multi-level multi-capacitated facility location problem{\textquoteright} (MLMCLP) and generated an artificial set of instances in order to validate the new formulation. Finally, we apply the Branch and Cut solving method using the CPLEX solver. The results obtained from the numerical study show the robustness of the proposed formulation. {\textcopyright} 2022 IEEE.}, keywords = {Branch Cuts, Budget constraint, Budget control, Capacitated facility location problems, Capacitated location, Facilities locations, Freight transportation, Generalisation, Integer programming, Location, Location problems, Multi-capacity, Multilevels, Problem formulation}, doi = {10.1109/GOL53975.2022.9820242}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135206286\&doi=10.1109\%2fGOL53975.2022.9820242\&partnerID=40\&md5=d5a9b0d02e9e1d7bfa52d2971ac50263}, author = {Abbal, K. and El Amrani, M. and Benadada, Y.} } @article {Zerouaoui20221, title = {A new approach for histological classification of breast cancer using deep hybrid heterogenous ensemble}, journal = {Data Technologies and Applications}, year = {2022}, note = {cited By 0}, pages = {1-34}, abstract = {Purpose: Hundreds of thousands of deaths each year in the world are caused by breast cancer (BC). An early-stage diagnosis of this disease can positively reduce the morbidity and mortality rate by helping to select the most appropriate treatment options, especially by using histological BC images for the diagnosis. Design/methodology/approach: The present study proposes and evaluates a novel approach which consists of 24 deep hybrid heterogenous ensembles that combine the strength of seven deep learning techniques (DenseNet 201, Inception V3, VGG16, VGG19, Inception-ResNet-V3, MobileNet V2 and ResNet 50) for feature extraction and four well-known classifiers (multi-layer perceptron, support vector machines, K-nearest neighbors and decision tree) by means of hard and weighted voting combination methods for histological classification of BC medical image. Furthermore, the best deep hybrid heterogenous ensembles were compared to the deep stacked ensembles to determine the best strategy to design the deep ensemble methods. The empirical evaluations used four classification performance criteria (accuracy, sensitivity, precision and F1-score), fivefold cross-validation, Scott{\textendash}Knott (SK) statistical test and Borda count voting method. All empirical evaluations were assessed using four performance measures, including accuracy, precision, recall and F1-score, and were over the histological BreakHis public dataset with four magnification factors (40{\texttimes}, 100{\texttimes}, 200{\texttimes} and 400{\texttimes}). SK statistical test and Borda count were also used to cluster the designed techniques and rank the techniques belonging to the best SK cluster, respectively. Findings: Results showed that the deep hybrid heterogenous ensembles outperformed both their singles and the deep stacked ensembles and reached the accuracy values of 96.3, 95.6, 96.3 and 94 per cent across the four magnification factors 40{\texttimes}, 100{\texttimes}, 200{\texttimes} and 400{\texttimes}, respectively. Originality/value: The proposed deep hybrid heterogenous ensembles can be applied for the BC diagnosis to assist pathologists in reducing the missed diagnoses and proposing adequate treatments for the patients. {\textcopyright} 2022, Emerald Publishing Limited.}, keywords = {Borda Count, Breast Cancer, Computer aided diagnosis, Convolutional neural network, Convolutional neural networks, Decision trees, Deep convolutional neural network, Deep neural networks, Digital pathologies, Diseases, Empirical evaluations, Ensemble methods, F1 scores, Histological images, Image classification, Images processing, Medical imaging, Nearest neighbor search, Statistical tests, Support vector machines}, doi = {10.1108/DTA-05-2022-0210}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140306559\&doi=10.1108\%2fDTA-05-2022-0210\&partnerID=40\&md5=52f113ae4cf2b14566d0b92c7140ffe1}, author = {Zerouaoui, H. and Idri, A. and El Alaoui, O.} } @conference {Najmani2022, title = {Offline and Online Evaluation for Recommender Systems}, booktitle = {2022 International Conference on Intelligent Systems and Computer Vision, ISCV 2022}, year = {2022}, note = {cited By 0}, abstract = {Recommender systems aim to facilitate decision making for users by offering them information according to their preferences, they are now popular in several application domains. The evaluation of recommender systems is very important to have an effective application in practice. In addition, it focuses to find better algorithms and evaluate their performance. However, researchers did not give much attention to it in this field. There are various ways to evaluate a recommender system. In this paper, we will discuss the main types of evaluations in this domain, which are offline and online evaluation, we will start with an overview of recommender systems, then we will present each type of evaluation, and we will compare the offline and the online evaluation for recommender systems. We will base on ten factors which are, the reproducibility, the reliability of the results of each type of evaluation, the preparation cost, the evaluation, the stability, the possibility of extensibility that{\textquoteright}s mean if we can add new metrics or not, the scalability, the passed time, deep analysis, and the sparsity metric. Finally, we will discuss the factors which are presented in the comparison. {\textcopyright} 2022 IEEE.}, keywords = {Applications domains, Decision making, Decisions makings, It focus, Offline, Offline evaluation, On-line evaluation, Online systems, Performance, Recommender Systems, Reliability analysis, Reproducibilities}, doi = {10.1109/ISCV54655.2022.9806059}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85134293509\&doi=10.1109\%2fISCV54655.2022.9806059\&partnerID=40\&md5=31b11e740c58602df2b12bf3260a1ec9}, author = {Najmani, K. and Ajallouda, L. and Benlahmar, E.H. and Sael, N. and Zellou, A.} } @article {Toub20221, title = {Operating room scheduling 2019 survey}, journal = {International Journal of Medical Engineering and Informatics}, volume = {14}, number = {1}, year = {2022}, note = {cited By 2}, pages = {1-30}, abstract = {Numerous optimisation problems in healthcare have been approached by researchers over the last three to four decades. Hospital logistics - organised and structured to secure patient satisfaction in terms of quality, quantity, time, security and least cost - forms part of the quest for global performance. We provide herein a review of recent study and applications of operations research in healthcare. In particular, we survey work on optimisation problems, focusing on the planning and scheduling of operating rooms. The latter is a highly strategic place within the hospital as it requires key medical competence and according to Macario (2008) surgical sector expenditure represents nearly a third of a hospital{\textquoteright}s budget. We analyse recent research on operating room planning and scheduling from 2008 to 2019; our evaluation is based on patient characteristics, performance measurement, the solution techniques used in the research and the applicability of the research to real life cases. The searches were based on PubMed, Web of Science, Science Direct and Google Scholar databases. Copyright {\textcopyright} 2022 Inderscience Enterprises Ltd.}, keywords = {adult, agricultural worker, Article, budget, clinical evaluation, computer assisted tomography, Computer simulation, cost effectiveness analysis, eutrophication, febrile neutropenia, female, genetic algorithm, health care cost, health care facility, health care system, hip replacement, hospital cost, hospitalization, human, intensive care unit, length of stay, Machine learning, male, mathematical model, operating room personnel, operation duration, Patient satisfaction, population size, stochastic model, system analysis, Time series analysis, total quality management, vaccination, work environment, workload}, doi = {10.1504/IJMEI.2022.119307}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85120773204\&doi=10.1504\%2fIJMEI.2022.119307\&partnerID=40\&md5=a2ac2109f88463231d4e37b0a988843f}, author = {Toub, M. and Souissi, O. and Achchab, S.} } @conference {Toub2022, title = {Operating rooms scheduling using Variable Neighborhood Search meta-heuristic}, booktitle = {2022 2nd International Conference on Innovative Research in Applied Science, Engineering and Technology, IRASET 2022}, year = {2022}, note = {cited By 1}, abstract = {Healthcare systems around the world are faced with rising costs, increasingly complex illnesses and declining reimbursements. In this context, problems related to health optimization are complex, because they concern the fabrication of schedules that absorb the disturbances occurring in the future. The operating room is a complex environment, at risk for the safety of the patient, and which involves many stakeholders. Its organization must be based on a reflection around the management of flows to contribute to the improvement of the patient{\textquoteright}s journey. In this paper, we propose a smart operating rooms scheduling using Variable Neighborhood Search (VNS) meta-heuristic. According to the literature review, VNS meta-heuristic approach still underused to resolve the cited issue. In this work, we have developed two VNS basic schemes of (Variable Neighborhood Descent (VND) and General VNS (GVNS)) in order to show their efficiency to meet operating rooms planning and scheduling problem challenges. {\textcopyright} 2022 IEEE.}, keywords = {Complex environments, Healthcare systems, Heuristic algorithms, Heuristic methods, Literature reviews, Metaheuristic, Operating rooms, Operating rooms scheduling, Operating theatre, Operational research, Optimisations, Optimization, Scheduling, Surgerie, Variable neighborhood search}, doi = {10.1109/IRASET52964.2022.9738371}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85127982211\&doi=10.1109\%2fIRASET52964.2022.9738371\&partnerID=40\&md5=a5119109d2fa9362b7b5f3b9ba37aa99}, author = {Toub, M. and Achchab, S. and Souissi, O.} } @article {Ouifak2022456, title = {Performance-Interpretability Tradeoff of Mamdani Neuro-Fuzzy Classifiers for Medical Data}, journal = {Lecture Notes in Networks and Systems}, volume = {468 LNNS}, year = {2022}, note = {cited By 0}, pages = {456-465}, abstract = {Neuro-fuzzy systems are models that incorporate the learning ability and performance of Artificial Neural Networks (ANNs) with the interpretable reasoning of fuzzy inference systems (FISs). An ANN can learn patterns from data and achieve high accuracy, while a FIS uses linguistic and interpretable rules to match inputs and outputs of the data. Two types of FISs are used the most in literature: Takagi-Sugeno-Kang (TSK) and Mamdani. The main focus of this paper is on the Mamdani neuro-fuzzy systems, notably the Hybrid Neuro-Fuzzy Inference System (HyFIS) and the Neuro-Fuzzy Classifier (NEFCLASS). It aims at evaluating and comparing the two classifiers over two medical datasets to study their performance-interpretability tradeoff. Results show that HyFIS is the best in terms of performance, while NEFCLASS is better in terms of interpretability. As for the performance-interpretability tradeoff, NEFCLASS has the best overall results; it achieves a good performance while being less complicated and more interpretable. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04826-5_46}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130272925\&doi=10.1007\%2f978-3-031-04826-5_46\&partnerID=40\&md5=65dc74043fe03a59608e34846f4fd7db}, author = {Ouifak, H. and Idri, A. and Benbriqa, H. and Abnane, I.} } @article {Ajallouda20221601, title = {Present and absent keyphrases extraction: an approach based on sentence embedding}, journal = {Indonesian Journal of Electrical Engineering and Computer Science}, volume = {28}, number = {3}, year = {2022}, note = {cited By 0}, pages = {1601-1612}, abstract = {The automatic keyphrases extraction (AKE) of a document is any expression by which we can learn its content without having to read it. Keyphrases are exploited in natural language processing (NLP) applications. These phrases are often mentioned in the document but there may be some keyphrases that are not mentioned. In the field of AKE, researchers have exploited many techniques, such as statistical calculation, deep learning algorithms, graph representation, and sentence embedding techniques. Approaches that exploit embedding techniques calculate the similarity between a document and a candidate keyphrase, where similar phrases to the document are considered as keyphrases. Representing the document by a single vector makes its performance poor, especially in long documents. This is in addition to the inability of these methods to generate absent keyphrases. In order to overcome these problems, our paper proposes an unsupervised approach to AKE, based on the universal sentence encoder (USE) to represent candidate keyphrases and parts of the document probably containing keyphrases. Our method also generates keyphrases not mentioned in the text. We compared the performance of the proposed approach with other methods based on embedding techniques, where the results showed the superiority of our approach especially in long documents. {\textcopyright} 2022 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijeecs.v28.i3.pp1601-1612}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140265191\&doi=10.11591\%2fijeecs.v28.i3.pp1601-1612\&partnerID=40\&md5=b9dbdcf5c9109b2737938f306a2aee4f}, author = {Ajallouda, L. and Zellou, A.} } @article {Naji2022832, title = {Psychological Behavior Prediction through Sentiment Analysis Technics: Transformers and ML Approach}, journal = {Journal of Computer Science}, volume = {18}, number = {9}, year = {2022}, note = {cited By 0}, pages = {832-840}, abstract = {In the era of the COVID-19 epidemic, governments have imposed nationwide lockdowns, which make a huge change to people{\textquoteright}s daily routines. This last impacts indirectly the well-being of people{\textquoteright}s mental health. And due to social media, many conversations about these phenomena occur online, especially those related to people{\textquoteright}s emotions. Which brought challenges and opportunities for sentiment analysis researchers. In this article, we are interested in extracting correlations between this epidemic and its psychological effects by analyzing users{\textquoteright} tweets through common Deep Learning and Machine Learning approaches used for text classification. This last goal is a crucial step to fulfill the main objective of our research: Developing an intelligent system that provides recommendations such as positive support and early alert to help people in case of specific needs particularly challenging mental states {\textcopyright} 2022 Maryame Naji, Najima Daoudi and Rachida Ajhoun}, doi = {10.3844/jcssp.2022.832.840}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85138207279\&doi=10.3844\%2fjcssp.2022.832.840\&partnerID=40\&md5=4450c45ecbc26b88d284f6d19b57a9bd}, author = {Naji, M. and Daoudi, N. and Ajhoun, R.} } @article {Annoukoubi20221519, title = {Reduction of harmonics emission of a WECS in the electrical grid using multilevel inverters}, journal = {International Journal of Power Electronics and Drive Systems}, volume = {13}, number = {3}, year = {2022}, note = {cited By 0}, pages = {1519-1536}, abstract = {Wind energy conversion system (WECS) is composed of many non-linear power electronic sub systems, which contribute significantly in harmonic emissions that is a threat for the quality of electrical power. Hence, for a better integration of WECS in the electrical grid and in order to satisfy IEEE 519 standards, WECS must inject a quality power with a rate of total harmonics distortion (THD) that is less than 5\%. Multilevel Inverters are an emerging solution for having a perfect sinusoidal output voltage with minimum harmonic content and lower switching losses than the two-level inverter so that only a smaller filter size is required. Thus, in this paper we are presenting a significantly improved results of the reduction of the grid injected current THD using three types of inverters (two-levels, three-levels NPC, and five-levels H-bridge cascade) for a WECS and comparing the THD performances of using each of the studied inverter. All results of THD are verified by a Fast Fourier transform simulation using MATLAB/Simulink. {\textcopyright} 2022, Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijpeds.v13.i3.pp1519-1536}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85134994487\&doi=10.11591\%2fijpeds.v13.i3.pp1519-1536\&partnerID=40\&md5=3b1a02b6e4009b23736b72506b19cdaa}, author = {Annoukoubi, M. and Essadki, A. and Laghridat, H. and Nasser, T.} } @article {Mezouar2022501, title = {The Retirement Supply Chain Improvement Using L-SCOR}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {501-514}, abstract = {In order to design an adaptable business process, the SCOR model was extended to L-SCOR model integrating the concept of adaptability at the two sections {\textquotedblleft}Process{\textquotedblright} and {\textquotedblleft}Metrics{\textquotedblright}. This work represents a case study that gives the chance to apply L-SCOR on the retirement supply chain. This paper started by giving an overall overview about L-SCOR model then detailed the studied supply chain models at two levels of L-SCOR. The second level model expressed the flows between the planning processes and the business process sL3 {\textquotedblleft}Learn Make{\textquotedblright} that describes the management of adaptability for rights liquidation. The fourth level model explains how the recommendation web service will be used for this business process. The paper also detailed the prerequisites for the recommendation web service which implements the Q-Learning algorithm, and discussed the returning Q-table and the optimal strategy for the called activity. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_38}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135009603\&doi=10.1007\%2f978-3-031-07969-6_38\&partnerID=40\&md5=1b13ad88086e61020688b4e24661bd94}, author = {Mezouar, H. and El Afia, A.} } @conference {Sardi2022435, title = {A Reusable Catalog of Requirements for Gamified Mobile Health Applications}, booktitle = {International Conference on Evaluation of Novel Approaches to Software Engineering, ENASE - Proceedings}, year = {2022}, note = {cited By 0}, pages = {435-442}, abstract = {The new era of mobile technologies has revolutionized all fundamental human pursuits in that many sectors have been reshaped and tangibly improved, such as education, finance, and healthcare. Referred to as mHealth, the use of mobile technologies to track and improve health outcomes is a rapidly expanding trend. A plethora of mobile health applications (mHealth apps) are being constantly developed thanks to the rapid adoption of mobile devices. From basic exercise trackers to powerful self-management tools, mHealth apps play several roles towards the provision of better healthcare. To induce playful experiences and boost engagement, gamification has proven to be extremely effective with mHealth apps. This paper presents a catalog of reusable requirements for gamified mHealth apps in terms of gamification, functional suitability, usability, and security, Further improvements are intended to be made continuously to the catalog by adding additional requirements scattered from other standards and information sources. Copyright {\textcopyright} 2022 by SCITEPRESS {\textendash} Science and Technology Publications, Lda. All rights reserved.}, keywords = {Biomedical engineering, Catalog, Gamification, Health outcomes, ISO/IEC, ISO/IEC 25010, Management tool, mHealth, Mobile health application, Mobile Technology, Requirement, Self management}, doi = {10.5220/0011071700003176}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140984297\&doi=10.5220\%2f0011071700003176\&partnerID=40\&md5=dc18bd51fe840506f2a31af133df9fb2}, author = {Sardi, L. and Idri, A. and Redman, L.M. and Alami, H. and Fernandez-Aleman, J.L.} } @article {Radouane20223507, title = {A ROBUST WATERMARKING SCHEME BASED ON DCT, IWT and SVD WITH OPTIMAL BLOCK}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {100}, number = {11}, year = {2022}, note = {cited By 0}, pages = {3507-3521}, abstract = {Signal processing{\textquoteright}s impact on development of digital media technologies have become a hot topic. The increased of computer network and the growth of the Internet have facilitated the production and distribution of unauthorized copies of multimedia information (text, image, sound, and video). To ensure multimedia security, researchers are focusing on digital image watermarking. With this new concept, the watermark is not just hiding in an image, but it{\textquoteright}s marked indelibly. In this paper a robust method of digital images watermarking based on combination of DCT, IWT and SVD is proposed. At first, Visual cryptography is used to encrypt the watermark image. Then DCT is applied to it and to the host image. IWT and SVD are applied on DCT coefficients of both watermark and host images. After that, the watermarking process is done by embedding singular values of watermark image to the singular values of host image. Moreover, the obtained watermarked images are subjected to different attacks to improve the robustness of the proposed scheme. Finally, the extraction process is based on watermarked image and the reverse method of embedding process to reconstruct the original watermark. The performance is evaluated under various attacks and experimental results show that our algorithm provides a high level of robustness and imperceptibility than the state-of-the-art methods. {\textcopyright} 2022 Little Lion Scientific.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85133519929\&partnerID=40\&md5=7957ed0e26c3399d0ba554b9a2f98aa5}, author = {Radouane, M. and Zouggari, N.I. and Amraoui, A. and Amraoui, M.} } @conference {Illi2022152, title = {On the Secrecy Analysis of a RIS-aided Wireless Communication System Subject to Phase Quantization Errors}, booktitle = {2022 International Balkan Conference on Communications and Networking, BalkanCom 2022}, year = {2022}, note = {cited By 0}, pages = {152-156}, abstract = {In this paper, we analyze the physical layer security of a jamming-aided wireless communication system assisted by a reconfigurable intelligent surface (RIS). Our setup consists of a source node communicating with a destination node with the help of a RIS, and under the presence of a malicious passive eavesdropper attempting to overhear the genuine transmission. Furthermore, an external cooperative jammer is incorporated to reinforce the system{\textquoteright}s secrecy by broadcasting an artificial noise to disrupt the eavesdropper; such a noise can be removed at the destination. Finally, we assume that the tunable RIS phases based on the channel estimates are subject to quantization errors. By virtue of the well-adopted Gamma and Exponential distributions approximations, an approximate expression for the system{\textquoteright}s intercept probability (IP) is derived in terms of the main setup parameters. The results show that the greater the number of reflective elements (REs) and jamming power, the better the secrecy, while the number of quantization bits has less impact when exceeding 3 bits. In particular, an IP of 10-3 can be reached with 50 REs even when the legitimate link{\textquoteright}s average signal-to-noise ratio (SNR) is 15 dB below the wiretap channel{\textquoteright}s average SNR. Monte Carlo simulations endorse the derived analytical results. {\textcopyright} 2022 IEEE.}, keywords = {Cooperative communication, Cooperative jamming, Destination nodes, Eavesdropping, Errors, Intelligent systems, Intercept probability, Jamming, Monte Carlo methods, Network layers, Phase quantization errors, Physical layer security, Probability distributions, Reconfigurable, Reconfigurable intelligent surface, Signal to noise ratio, Source nodes, Wireless communication system}, doi = {10.1109/BalkanCom55633.2022.9900815}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136129114\&doi=10.1109\%2fBalkanCom55633.2022.9900815\&partnerID=40\&md5=021859cbb6665cc6388da51264628ce6}, author = {Illi, E. and Qaraqe, M.K. and El Bouanani, F. and Al-Kuwari, S.M.} } @article {ElHaddaoui2022529, title = {On the Sensitivity of LSTMs to Hyperparameters and Word Embeddings in the Context of Sentiment Analysis}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {529-542}, abstract = {Recurrent neural networks are still providing excellent results in sentiment analysis tasks, variants such as LSTM and Bidirectional LSTM have become a reference for building fast and accurate predictive models. However, such performance is difficult to obtain due to the complexity of the models and the hyperparameters choice. LSTM based models can easily overfit to the studied domain, and tuning the hyperparameters to get the desired model is the keystone of the training process. In this work, we provide a study on the sensitivity of a selection of LSTM based models to various hyperparameters and we highlight important aspects to consider while using similar models in the context of sentiment analysis. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_40}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135093996\&doi=10.1007\%2f978-3-031-07969-6_40\&partnerID=40\&md5=695a2003d02e28d6d72df06cca749381}, author = {El Haddaoui, B. and Chiheb, R. and Faizi, R. and El Afia, A.} } @article {Abouaomar2022, title = {Service Function Chaining in MEC: A Mean-Field Game and Reinforcement Learning Approach}, journal = {IEEE Systems Journal}, year = {2022}, note = {cited By 0}, pages = {1-12}, abstract = {Multiaccess edge computing (MEC) and network virtualization technologies are important enablers for fifth generation networks to deliver diverse services. Services are often provided as fully connected virtual network functions (VNFs), through service function chaining (SFC). However, the problem of allocating SFC resources at the edge still faces many challenges related to the way VNFs are placed, chained, and scheduled. In this article, to solve these problems, we propose a game theory-based approach with the objective to reduce service latency in the context of SFC at the edge. The problem of allocating SFC resources can be divided into two subproblems: 1) the VNF placement and routing subproblem, and 2) the VNF scheduling subproblem. For the former subproblem, we formulate it as a mean-field game in which VNFs are contending over edge resources aiming at reducing the resource consumption of MEC nodes and reducing latency for users. We also propose a reinforcement learning-based technique, where the Ishikawa{\textendash}Mann learning algorithm is used. For the later subproblem, we formulate it as a matching game between VFNs and edge resources to find the execution order of the VNFs while reducing the latency. To efficiently solve it, we propose a modified version of the many-to-one deferred acceptance algorithm (DAA), called the enhanced multistep DAA. To illustrate the performance of the proposed approaches, we perform extensive simulations. The results show that the approaches achieve up to 40\% less resource consumption, and up to 38\% less latency than the benchmarked state-of-the-art methods. IEEE}, keywords = {Computation theory, Computer games, Edge computing, Game, Game theory, Learning algorithms, Network function virtualization, Problem solving, Processor scheduling, Reinforcement learning, Resource allocation, Resource allocation/provisioning, Resource Management, Resources allocation, Routings, Scheduling, Service function chaining, Service functions, Transfer functions, Virtual reality, Virtualizations}, doi = {10.1109/JSYST.2022.3171232}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85131721006\&doi=10.1109\%2fJSYST.2022.3171232\&partnerID=40\&md5=e7ecad4d011f9a66bf06f1e9f3f4d176}, author = {Abouaomar, A. and Cherkaoui, S. and Mlika, Z. and Kobbane, A.} } @article {ElJai2022, title = {Socio-economic analysis of short-term trends of COVID-19: modeling and data analytics}, journal = {BMC Public Health}, volume = {22}, number = {1}, year = {2022}, note = {cited By 0}, abstract = {Background: COVID-19 caused a worldwide outbreak leading the majority of human activities to a rough breakdown. Many stakeholders proposed multiple interventions to slow down the disease and number of papers were devoted to the understanding the pandemic, but to a less extend some were oriented socio-economic analysis. In this paper, a socio-economic analysis is proposed to investigate the early-age effect of socio-economic factors on COVID-19 spread. Methods: Fifty-two countries were selected for this study. A cascade algorithm was developed to extract the R0 number and the day J*; these latter should decrease as the pandemic flattens. Subsequently, R0 and J* were modeled according to socio-economic factors using multilinear stepwise-regression. Results: The findings demonstrated that low values of days before lockdown should flatten the pandemic by reducing J*. Hopefully, DBLD is only parameter to be tuned in the short-term; the other socio-economic parameters cannot easily be handled as they are annually updated. Furthermore, it was highlighted that the elderly is also a major influencing factor especially because it is involved in the interactions terms in R0 model. Simulations proved that the health care system could improve the pandemic damping for low elderly. In contrast, above a given elderly, the reproduction number R0 cannot be reduced even for developed countries (showing high HCI values), meaning that the disease{\textquoteright}s severity cannot be smoothed regardless the performance of the corresponding health care system; non-pharmaceutical interventions are then expected to be more efficient than corrective measures. Discussion: The relationship between the socio-economic factors and the pandemic parameters R0 and J* exhibits complex relations compared to the models that are proposed in the literature. The quadratic regression model proposed~here has discriminated the most influencing parameters within the following approximated order, DLBL, HCI, Elderly, Tav, CO2, and WC as first order, interaction, and second order terms. Conclusions: This modeling allowed the emergence of interaction terms that don{\textquoteright}t appear in similar studies; this led to emphasize more complex relationship between the infection spread and the socio-economic factors. Future works will focus on enriching the datasets and the optimization of the controlled parameters to short-term slowdown of similar pandemics. {\textcopyright} 2022, The Author(s).}, keywords = {aged, communicable disease control, COVID-19, Data Science, epidemiology, human, Humans, SARS-CoV-2, Socioeconomic Factors, socioeconomics}, doi = {10.1186/s12889-022-13788-4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136870489\&doi=10.1186\%2fs12889-022-13788-4\&partnerID=40\&md5=319bf3cc0bc571a1c7ddf78cab18333b}, author = {El Jai, M. and Zhar, M. and Ouazar, D. and Akhrif, I. and Saidou, N.} } @article {Aloqaily20221283, title = {Special Issue on Internet of Things: Intelligent Networks, Communication and Mobility (AdHocNets 2020)}, journal = {Mobile Networks and Applications}, volume = {27}, number = {3}, year = {2022}, note = {cited By 0}, pages = {1283-1285}, doi = {10.1007/s11036-022-01951-5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85125040615\&doi=10.1007\%2fs11036-022-01951-5\&partnerID=40\&md5=54584bce4af47a9f2a8ead852da1944d}, author = {Aloqaily, M. and Abdellatif, K. and Yan, F.} } @article {Ajallouda202231, title = {A Systematic Literature Review of Keyphrases Extraction Approaches}, journal = {International Journal of Interactive Mobile Technologies}, volume = {16}, number = {16}, year = {2022}, note = {cited By 0}, pages = {31-58}, abstract = {The keyphrases of a document are the textual units that characterize its content such as the topics it addresses, its ideas, their field, etc. Thousands of books, articles and web pages are published every day. Manually extracting keyphrases is a tedious task and takes a lot of time. Automatic keyphrases extraction is an area of text mining that aims to identify the most useful and important phrases that give meaning to the content of a document. Keyphrases can be used in many Natural Language Processing (NLP) applications, such as text summarization, text clustering and text classification. This article provides a Systematic Literature Review (SLR) to investigate, analyze, and discuss existing relevant contributions and efforts that use new concepts and tools to improve keyphrase extraction. We have studied the supervised and unsupervised approaches to extracting keyphrases published in the period 2015{\textendash}2022. We have also identified the steps most commonly used by the different approaches. Additionally, we looked at the criteria that should be evaluated to improve the accuracy of keyphrases extraction. Each selected approach was evaluated for its ability to extract keyphrases. Our findings highlight the importance of keyphrase extraction, and provide researchers and practitioners with information about proposed solutions and their limitations, which contributes to extract keyphrases in a powerful and meaningful way effective {\textcopyright} 2022, International Journal of Interactive Mobile Technologies.All Rights Reserved.}, doi = {10.3991/ijim.v16i16.33081}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137632849\&doi=10.3991\%2fijim.v16i16.33081\&partnerID=40\&md5=cf158a54305193c3291a26b2c49d765f}, author = {Ajallouda, L. and Fagroud, F.Z. and Zellou, A. and Benlahmar, E.H.} } @article {Mezouar2022317, title = {A Systematic Literature Review of Machine Learning Applications in Software Engineering}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {317-331}, abstract = {Machine Learning (ML) has been a concern in Software Engineering (SE) over the past years. However, how to use ML and what it can offer for SE is still subject to debate among researchers. This paper investigates the application of ML in SE. The goal is to identify the used algorithms, the addressed topics and the main findings. It performs a Systematic Literature Review (SLR) of peer-reviewed studies published between 1995 and 2020. Data extracted from the studies show that ML algorithms are of great practical value in the different activities of software development process, especially {\textquotedblleft}Software specification{\textquotedblright} and {\textquotedblleft}Software validation{\textquotedblright} since {\textquotedblleft}Software bug prediction{\textquotedblright} and {\textquotedblleft}Software quality improvement{\textquotedblright} are the most recurring research topics. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_24}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135059882\&doi=10.1007\%2f978-3-031-07969-6_24\&partnerID=40\&md5=744dceb379fd3656a37a9965da30e49f}, author = {Mezouar, H. and Afia, A.E.} } @article {Niharmine20224164, title = {Tifinagh handwritten character recognition using optimized convolutional neural network}, journal = {International Journal of Electrical and Computer Engineering}, volume = {12}, number = {4}, year = {2022}, note = {cited By 1}, pages = {4164-4171}, abstract = {Tifinagh handwritten character recognition has been a challenging problem due to the similarity and variability of its alphabets. This paper proposes an optimized convolutional neural network (CNN) architecture for handwritten character recognition. The suggested model of CNN has a multi-layer feedforward neural network that gets features and properties directly from the input data images. It is based on the newest deep learning open-source Keras Python library. The novelty of the model is to optimize the optical character recognition (OCR) system in order to obtain best performance results in terms of accuracy and execution time. The new optical character recognition system is tested on a customized dataset generated from the amazigh handwritten character database. Experimental results show a good accuracy of the system (99.27\%) with an optimal execution time of the classification compared to the previous works. {\textcopyright} 2022 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijece.v12i4.pp4164-4171}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85129672744\&doi=10.11591\%2fijece.v12i4.pp4164-4171\&partnerID=40\&md5=052fc4046f2fd3194e9d1315bb43dc05}, author = {Niharmine, L. and Outtaj, B. and Azouaoui, A.} } @article {Khaldi20221377, title = {TimeSpec4LULC: a global multispectral time series database for training LULC mapping models with machine learning}, journal = {Earth System Science Data}, volume = {14}, number = {3}, year = {2022}, note = {cited By 0}, pages = {1377-1411}, abstract = {Land use and land cover (LULC) mapping are of paramount importance to monitor and understand the structure and dynamics of the Earth system. One of the most promising ways to create accurate global LULC maps is by building good quality state-of-the-art machine learning models. Building such models requires large and global datasets of annotated time series of satellite images, which are not available yet. This paper presents TimeSpec4LULC 10.5281/zenodo.5913554;Currency sign, a smart open-source global dataset of multispectral time series for 29 LULC classes ready to train machine learning models. TimeSpec4LULC was built based on the seven spectral bands of the MODIS sensors at 500gh{\texteuro}{\textasciimacron}m resolution, from 2000 to 2021, and was annotated using spatial-temporal agreement across the 15 global LULC products available in Google Earth Engine (GEE). The 22-year monthly time series of the seven bands were created globally by (1) applying different spatial-temporal quality assessment filters on MODIS Terra and Aqua satellites; (2) aggregating their original 8gh{\texteuro}{\textasciimacron}d temporal granularity into monthly composites; (3) merging Terragh{\texteuro}{\textasciimacron}+gh{\texteuro}{\textasciimacron}Aqua data into a combined time series; and (4) extracting, at the pixel level, 6gh{\texteuro}{\textasciimacron}076gh{\texteuro}{\textasciimacron}531 time series of size 262 for the seven bands along with a set of metadata: geographic coordinates, country and departmental divisions, spatial-temporal consistency across LULC products, temporal data availability, and the global human modification index. A balanced subset of the original dataset was also provided by selecting 1000 evenly distributed samples from each class such that they are representative of the entire globe. To assess the annotation quality of the dataset, a sample of pixels, evenly distributed around the world from each LULC class, was selected and validated by experts using very high resolution images from both Google Earth and Bing Maps imagery. This smartly, pre-processed, and annotated dataset is targeted towards scientific users interested in developing various machine learning models, including deep learning networks, to perform global LULC mapping. {\textcopyright} 2022 Rohaifa Khaldi et al.}, keywords = {Land cover, Land use, Machine learning, MODIS, multispectral image, spatiotemporal analysis, Time series analysis}, doi = {10.5194/essd-14-1377-2022}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85127624906\&doi=10.5194\%2fessd-14-1377-2022\&partnerID=40\&md5=90f9e45c7b39e149ab387a54f4392d6a}, author = {Khaldi, R. and Alcaraz-Segura, D. and Guirado, E. and Benhammou, Y. and El Afia, A. and Herrera, F. and Tabik, S.} } @article {Ajallouda2022466, title = {Toward a New Process for Candidate Key-Phrases Extraction}, journal = {Lecture Notes in Networks and Systems}, volume = {455 LNNS}, year = {2022}, note = {cited By 3}, pages = {466-474}, abstract = {Key-phrases in a document are the terms that allow us to have or know an idea of its content without having to read it. They can be used in many Natural Language Processing (NLP) applications, such as text summarization, machine translation, and text classification. These phrases are selected from a set of terms in the document called candidate key-phrases. Thus, any flaws that may arise during the selection of candidate phrases may affect the automatic key-phrase extraction (AKE). Despite the importance of identifying candidate key-phrases in the AKE process, we found a very limited number of researchers interested in identifying their features in the document. In this paper, we will present the features that will allow the identification of candidate key-phrases, based on the study and analysis of the features of 60,000 key-phrases manually selected from five different datasets. To improve the performance of AKE approaches. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-02447-4_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130348254\&doi=10.1007\%2f978-3-031-02447-4_48\&partnerID=40\&md5=7bc1168842009b4c70c8fea6cc905514}, author = {Ajallouda, L. and Hourrane, O. and Zellou, A. and Benlahmar, E.H.} } @article {Mezouar2022492, title = {Toward a Self-adaptive Supply Chains: L-SCOR Implementation Proposal, and Case Studies Methodology Proposal}, journal = {Lecture Notes in Networks and Systems}, volume = {489 LNNS}, year = {2022}, note = {cited By 0}, pages = {492-500}, abstract = {In order to improve a business process behavior, to guarantee the service continuity, to minimalize the time wasted by automatic activities execution, L-SCOR is proposed to analyze the self-adaptability of a business process and to improve this performance. This paper gives an overall overview of the reference L-SCOR, and explains a proposal of its implementation based on reinforcement learning, and a methodology to conduct case studies based on six sigma strategies. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-07969-6_37}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135026919\&doi=10.1007\%2f978-3-031-07969-6_37\&partnerID=40\&md5=436de2817ad5296badb2e8ccf1743814}, author = {Mezouar, H. and El Afia, A.} } @article {Elmidaoui2022202, title = {Towards a Taxonomy of Software Maintainability Predictors: A Detailed View}, journal = {Lecture Notes in Networks and Systems}, volume = {470 LNNS}, year = {2022}, note = {cited By 0}, pages = {202-210}, abstract = {To help practitioners and researchers choose the most suitable predictors when selecting from existing Software Product Maintainability Prediction (SPMP) models or designing new ones, a literature review of empirical studies on SPMP identified a large number of metrics or factors used as predictors of maintainability. However, there is a redundancy and ambiguity in both the naming and meaning of these predictors. To address this terminology issue, a one-level taxonomy of the SPMP predictors identified in the literature review have been proposed. This paper now proposes a more detailed two-level taxonomy where the first level refers to four categories, namely, software design, software size, quality attributes (or factors), and software process, the second to sub-categories, and predictors inventoried from empirical studies on SPMP. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04829-6_18}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130308935\&doi=10.1007\%2f978-3-031-04829-6_18\&partnerID=40\&md5=e8e2d1411e399bbdb041c3d9d26ccad6}, author = {Elmidaoui, S. and Cheikhi, L. and Idri, A. and Abran, A.} } @article {Azougaghe202211, title = {Turbo decoding of concatenated codes based on RS codes using adapted scaling factors}, journal = {Infocommunications Journal}, volume = {14}, number = {1}, year = {2022}, note = {cited By 0}, pages = {11-16}, abstract = {Iteratively decoded block turbo codes are product codes that exhibit excellent performance with reasonable com-plexity. In this paper, a generalization of parallel concatenated block codes (GPCBs) based on RS codes is presented. We propose an efficient decoding algorithm with modifications of the Chase- Pyndiah algorithm is written by using Weighting factor α and Reliability factor β. In this work, we studied the effect of diverse parametres such as the effect of various component codes, interleaver size (number of sub-blocks) and number of iterations. The simulation results shows the relevance of the adapted parameters to decode generalized parallel concatenated block codes based on RS codes. The proposed algorithm (MCP) using the adapted parameters performs better than the one using with empirical parameters (CP). {\textcopyright} 2022 Scientific Association for Infocommunications. All rights reserved.}, keywords = {Block codes, Block Turbo codes, Chase decoding, Concatenated codes, Generalized parallel con-catenated code, Iterative decoding, Iterative decodings, Modified chase- pyndiah algorithm, Performance, Product code, RS codes, Scaling factors, Turbo codes, Turbo decoding}, doi = {10.36244/ICJ.2022.1.2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85129965517\&doi=10.36244\%2fICJ.2022.1.2\&partnerID=40\&md5=b8b9da3f379cbf44233c273bb8f4108f}, author = {Azougaghe, E.-S. and Farchane, A. and Safi, S. and Belkasmi, M.} } @article {Bouanane202277, title = {The vehicle routing problem with simultaneous delivery and pickup: A taxonomic survey}, journal = {International Journal of Logistics Systems and Management}, volume = {41}, number = {1-2}, year = {2022}, note = {cited By 0}, pages = {77-119}, abstract = {In recent years, the vehicle routing problem with simultaneous delivery and pickup (VRPSDP) and its variants, which consist of a study field closely related to the reverse logistics, are becoming much more prevalent and more frequently studied. However, the problem features and assumptions diversify considerably and no literature review has classified the existing studies. The main contributions of this paper are: 1) to define the VRPSDP{\textquoteright}s field in its entirety; 2) to accomplish an all-encompassing taxonomic classification of the VRPSDP literature published since 1989; 3) to delimit all of VRPSDP{\textquoteright}s facets parsimoniously. Proposing an adapted taxonomy, we classify 191 papers and categorise the trends in the VRPSDP literature. Copyright {\textcopyright} 2022 Inderscience Enterprises Ltd.}, doi = {10.1504/IJLSM.2022.120982}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85125709767\&doi=10.1504\%2fIJLSM.2022.120982\&partnerID=40\&md5=2c0c42404fd11b4e4c7608472b979cbd}, author = {Bouanane, K. and El Amrani, M. and Benadada, Y.} } @article {Annouch2021607, title = {An Adaptive Genetic Algorithm for a New Variant of the Gas Cylinders Open Split Delivery and Pickup with Two-dimensional Loading Constraints}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {12}, number = {1}, year = {2021}, note = {cited By 1}, pages = {607-619}, abstract = {This paper studies a combination of two well-known problems in distribution logistics, which are the truck loading problem and the vehicle routing problem. In our context, a customer daily demand exceeds the truck capacity. As a result, the demand has to be split into several routes. In addition, it is required to assign customers to depots, which means that each customer is visited just once by any truck in the fleet. Moreover, we take into consideration a customer time windows. The studied problem can be defined as a Multi-depots open split delivery and pickup vehicle routing problem with two-dimensional loading constraints and time windows (2L-MD-OSPDTW). A mathematical formulation of the problem is proposed as a mixed-integer linear programming model. Then, a set of four class instances is used in a way that reflects the real-life case study. Furthermore, a genetic algorithm is proposed to solve a large scale dataset. Finally, preliminary results are reported and show that the MILP performs very well for small test instances while the genetic algorithm can be efficiently used to solve the problem for a widereaching test instances. {\textcopyright} 2021. All rights reserved.}, keywords = {Adaptive genetic algorithms, Constraint programming, Fleet operations, Genetic algorithms, Integer programming, Large dataset, Loading constraints, Multi depots, Pickups, Routing algorithms, Sales, Split delivery, Split pickups, Test instances, Time windows, Trucks, Two-dimensional, Two-dimensional loading, Vehicle routing, Vehicle Routing Problems}, doi = {10.14569/IJACSA.2021.0120170}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85100379461\&doi=10.14569\%2fIJACSA.2021.0120170\&partnerID=40\&md5=7e2a6546b01de8c70b8cca5545b09a4e}, author = {Annouch, A. and Bellabdaoui, A.} } @article {Illi2021261, title = {Analysis of Asymmetric Dual-Hop Energy Harvesting-Based Wireless Communication Systems in Mixed Fading Environments}, journal = {IEEE Transactions on Green Communications and Networking}, volume = {5}, number = {1}, year = {2021}, note = {cited By 3}, pages = {261-277}, abstract = {This work investigates the performance of a dual-hop energy harvesting-based fixed-gain amplify-and-forward relaying communication system, subject to fading impairments. We consider a source node ( {S} ) communicating with a destination node ( {D} ), either directly or through a fixed distant relay ( {R} ), which harvests energy from its received signals and uses it to amplify and forward the received signals to {D}. We also consider maximal-ratio combining at {D} to combine the signals coming from {S} and {R}. Both power-splitting and time-switching energy harvesting protocols are investigated. The {S}\,\,-\,\,{R} link is modeled by Nakagami- {m} fading model, while the {R}\,\,-\,\,{D} and {S}\,\,-\,\,{D} links experience \alpha - μ fading. Closed-form expressions for the statistical properties of the total signal-to-noise ratio are derived, based on which novel closed-form expressions are then derived for the average symbol error rate as well as for the average channel capacity, considering four different adaptive transmission policies. The derived expressions are validated through Monte Carlo simulations. {\textcopyright} 2017 IEEE.}, keywords = {Adaptive transmission policy, Amplify-and-forward relaying, Average channel capacities, Average symbol error rate (SER), Channel capacity, Closed-form expression, Energy harvesting, Maximal ratio combining (MRC), Monte Carlo methods, Signal to noise ratio, Statistical properties, Wireless communication system}, doi = {10.1109/TGCN.2020.3035512}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103177403\&doi=10.1109\%2fTGCN.2020.3035512\&partnerID=40\&md5=8bfc86e88ca31e88a392a44a37280141}, author = {Illi, E. and Bouanani, F.E. and Sofotasios, P.C. and Muhaidat, S. and Costa, D.B.D. and Ayoub, F. and Al-Fuqaha, A.} } @article {Hakkoum2021587, title = {Assessing and Comparing Interpretability Techniques for Artificial Neural Networks Breast Cancer Classification}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, volume = {9}, number = {6}, year = {2021}, note = {cited By 11}, pages = {587-599}, abstract = {Breast cancer is the most common type of cancer among women. Thankfully, early detection and treatment improvements helped decrease the number of deaths. Data Mining techniques have always assisted BC tasks whether it is screening, diagnosis, prognosis, treatment, monitoring, and/or management. Nowadays, the use of Data Mining is witnessing a new era. In fact, the main objective is no longer to replace humans but to enhance their capabilities, which is why Artificial Intelligence is now referred to as Intelligence Augmentation. In this context, interpretability is used to help domain experts learn new patterns and machine learning experts debug their models. This paper aims to investigate three black-boxes interpretation techniques: Feature Importance, Partial Dependence Plot, and LIME when applied to two types of feed-forward Artificial Neural Networks: Multilayer perceptrons, and Radial Basis Function Network, trained on the Wisconsin Original dataset for breast cancer diagnosis. Results showed that local LIME explanations were instance-level interpretations that came in line with the global interpretations of the other two techniques. Global/local interpretability techniques can thus be combined to define the trustworthiness of a black-box model. {\textcopyright} 2021 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {Article, Artificial intelligence, artificial neural network, Breast Cancer, Breast cancer classifications, cancer diagnosis, Computer aided diagnosis, cross validation, Data mining, Data-mining techniques, Diseases, Domain experts, early diagnosis, entropy, Explainability, Feature importance, Interpretability, Learn+, learning, learning algorithm, Lime, Machine learning, Multilayer neural networks, nerve cell, nonhuman, Partial dependence plot, perceptron, prediction, prognosis, Radial basis function networks, Treatment monitoring}, doi = {10.1080/21681163.2021.1901784}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103249025\&doi=10.1080\%2f21681163.2021.1901784\&partnerID=40\&md5=78e1e57a62692bab2b39984182af7904}, author = {Hakkoum, H. and Idri, A. and Abnane, I.} } @article {Fagroud2021335, title = {A Brief Survey on Internet of Things (IoT)}, journal = {Lecture Notes in Networks and Systems}, volume = {211 LNNS}, year = {2021}, note = {cited By 5}, pages = {335-344}, abstract = {Today it be a necessity to setting off past the thought that IT as a cost center and seeing it as a benefit center, on backing of the improvement and method of the organization. Internet of things can be defined as a propelled standard which make straightforward gadget a shrewdly gadgets which allow transferring simple device an intelligent devices capable of exchanging data over a network without human interaction. It represent a technology that interest research and industry, also serve on transform the way to think, and work. This paper present an overview on Internet of things (IoT), its application areas and its challenges in the aim to give a guideline to future researchers. {\textcopyright} 2021, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-73882-2_31}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111331935\&doi=10.1007\%2f978-3-030-73882-2_31\&partnerID=40\&md5=5c90aac71bfc8656430ff23c2e19c367}, author = {Fagroud, F.Z. and Ajallouda, L. and Lahmar, E.H.B. and Toumi, H. and Zellou, A. and El Filali, S.} } @conference {Annoukoubi2021, title = {Cascade H-Bridge Multilevel Inverter for a Wind Energy Conversion System Applications}, booktitle = {Proceedings of 2021 9th International Renewable and Sustainable Energy Conference, IRSEC 2021}, year = {2021}, note = {cited By 1}, abstract = {This paper attends a study of the advantages of using a Cascade H-Bridge Multilevel inverter (MLI) for a wind energy conversion system WECS applications. As known WECS is a renewable system that aims to generate electrical energy from wind. In order to ensure the penetration of the generated energy into the power grid, it must satisfy IEEE 519 standards which consider that total harmonic distortion (THD) in power systems is limited in 5\%. Since Multilevel inverters have been suggested to deal with THD drawbacks of conventional two-level inverter devices in many applications. In this work, we present the study modulation and simulation in MATLAB/ SIMULINK of the wind energy conversion system based on a Cascade H-Bridge Multilevel. The results of the simulation will confirm the import of using this inverter instead of the conventional one. {\textcopyright} 2021 IEEE.}, keywords = {Asynchronous generators, Bridge circuits, Cascade H bridges, Cascade H-bridge inverte, Doubly fed induction generators, Electric fault currents, Electric inverters, Electric power transmission networks, Electrical energy, Energy, Energy conversion, IEEE Standards, MATLAB, Multi Level Inverter (MLI), Multilevels, System applications, Total harmonic distortions, Wind energy conversion system, Wind power}, doi = {10.1109/IRSEC53969.2021.9741171}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85128060785\&doi=10.1109\%2fIRSEC53969.2021.9741171\&partnerID=40\&md5=8f44bdb484c9f449dc46750d25fd7d71}, author = {Annoukoubi, M. and Essadki, A. and Nasser, T.} } @article {ElOuassif202150, title = {Classification techniques in breast cancer diagnosis: A systematic literature review}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, volume = {9}, number = {1}, year = {2021}, note = {cited By 12}, pages = {50-77}, abstract = {Data mining (DM) consists in analysing a~set of observations to find unsuspected relationships and then summarising the data in new ways that are both understandable and useful. It has become widely used in various medical fields including breast cancer (BC), which is the most common cancer and the leading cause of death among women~worldwide.~BC diagnosis is a~challenging medical task and many studies have attempted to apply classification techniques to it. The objective of the present study is to identify studies on classification techniques~in~BC diagnosis and to analyse them from~three perspectives: classification techniques used, accuracy of the classifiers, and comparison of performance. We performed a~systematic literature review (SLR) of 176 selected studies published between January~2000 and November~2018. The results show that, of the nine classification techniques investigated, artificial neural networks, support vector machines and decision trees were the most frequently used. Moreover, artificial neural networks, support vector machines and ensemble classifiers performed better than the other techniques, with median accuracy values of 95\%, 95\% and 96\% respectively. Most of the selected studies (57.4\%) used datasets containing different types of images such as mammographic, ultrasound, and microarray images. {\textcopyright} 2021 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {Article, Artificial intelligence, artificial neural network, Breast Cancer, Breast cancer diagnosis, cancer diagnosis, cause of death, Causes of death, Classification (of information), Classification technique, Comparison of performance, Computer aided diagnosis, data extraction, Data mining, data synthesis, decision tree, Decision trees, Diseases, human, k nearest neighbor, Machine learning, Medical fields, Medical informatics, Network support, Neural networks, publication, qualitative research, Quality control, support vector machine, Support vector machine classifiers, Support vector machines, Support vectors machine, Systematic literature review, Systematic Review, validity}, doi = {10.1080/21681163.2020.1811159}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85098842973\&doi=10.1080\%2f21681163.2020.1811159\&partnerID=40\&md5=9a48998b1c44d263863efcfb25f9920f}, author = {ElOuassif, B. and Idri, A. and Hosni, M. and Abran, A.} } @article {Azougaghe2021348, title = {Comparative Study on the McEliece Public-Key Cryptosystem Based on Goppa and QC-MDPC Codes}, journal = {Lecture Notes in Business Information Processing}, volume = {416 LNBIP}, year = {2021}, note = {cited By 0}, pages = {348-360}, abstract = {In recent years, much research has been conducted on quantum computers {\textendash} machine that exploit the phenomena of quantum mechanics to solve difficult or insoluble mathematical problems for conventional computers. If large-scale quantum computers are built, they will be able to break many of the public key cryptosystems currently in use. This would seriously compromise the confidentiality and integrity of digital communications on the internet. Post-quantum cryptography aims to develop secure cryptographic systems against both conventional as well as quantum computers for interacting with existing protocols and communication networks. In this paper we present a public key cryptosystem of McEliece based on the correcting codes, using two types of correcting codes; QC-MDPC and Goppa correcting codes. This latter seems very interesting considering its two characteristics, namely the power of correction and the efficient decoding algorithm which resistant to quantum attacks due to difficulty of decoding a linear code. On the other hand, QC-MDPC cryptosystem code is rapid and more secure than Goppa cryptosystem. {\textcopyright} 2021, Springer Nature Switzerland AG.}, keywords = {Conventional computers, Cryptographic systems, Decoding, Digital communication systems, Digital communications, Information analysis, Large scale quantum computers, Mathematical problems, McEliece public-key cryptosystem, Network security, Post quantum cryptography, Public key cryptography, Public key cryptosystems, Quantum cryptography, Quantum theory, Qubits}, doi = {10.1007/978-3-030-76508-8_25}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111125773\&doi=10.1007\%2f978-3-030-76508-8_25\&partnerID=40\&md5=e1b3a760dc5546ca8c452d1b973aa5fe}, author = {Azougaghe, E.-S. and Farchane, A. and Tazigh, I. and Azougaghe, A.} } @article {Aoun2021315, title = {A Cooperative Multi-swarm Particle Swarm Optimizer Based Hidden Markov Model}, journal = {Studies in Computational Intelligence}, volume = {906}, year = {2021}, note = {cited By 1}, pages = {315-334}, abstract = {Particle swarm optimization (PSO)~is a population-based stochastic metaheuristic algorithm; it has been successful in dealing with a multitude of optimization problems. Many PSO variants have been created to boost its optimization capabilities, in particular, to cope with more complex problems. In this paper, we provide a new approach of multi-population~particle swarm optimization with a cooperation strategy. The proposed algorithm splits the PSO population into four sub swarms and attributes a main role to each one. A machine learning technique is designed as an individual level to allow each particle to determine its suitable swarm membership at each iteration. In a collective level, cooperative rules are designed between swarms to ensure more diversity and realize the better solution using a Master/Slave cooperation scheme. Several simulations are performed on a set of benchmark functions to examine the performances of this approach compared to a multitude of state of the art of PSO variants. Experiments reveal a good computational efficiency of the presented method with distinguishable performances. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-58930-1_21}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097954015\&doi=10.1007\%2f978-3-030-58930-1_21\&partnerID=40\&md5=64af5832e5e4b2f749a5c24339a3911a}, author = {Aoun, O. and El Afia, A. and Talbi, E.-G.} } @article {HalhoulMerabet2021, title = {Corrigendum to {\textquotedblleft}Intelligent building control systems for thermal comfort and energy-efficiency: A systematic review of artificial intelligence-assisted techniques{\textquotedblright} (Renewable and Sustainable Energy Reviews (2021) 144, (11096), (S1364032121002616), (10}, journal = {Renewable and Sustainable Energy Reviews}, volume = {145}, year = {2021}, note = {cited By 0}, abstract = {The authors of the paper referred to in the title of this Corrigendum report a typo that slipped in the title of the third column in tables 3, 4, 5, 6, 7, 8, and 9, namely, {\textquotedblleft}Underlying AL/ML Tools{\textquotedblright} which should be corrected to {\textquotedblleft}Underlying AI/ML Tools{\textquotedblright}. We apologize for any confusion or inconvenience this typo may have caused. {\textcopyright} 2021 Elsevier Ltd}, doi = {10.1016/j.rser.2021.111116}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85104661544\&doi=10.1016\%2fj.rser.2021.111116\&partnerID=40\&md5=69c87a9ef7a2e1028872941094f56ec9}, author = {Halhoul Merabet, G. and Essaaidi, M. and Haddou, M.B. and Qolomany, B. and Qadir, J. and Anan, M. and Al-Fuqaha, A. and Abid, M.R. and Benhaddou, D.} } @article {Benbriqa2021588, title = {Deep and Ensemble Learning Based Land Use and Land Cover Classification}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12951 LNCS}, year = {2021}, note = {cited By 1}, pages = {588-604}, abstract = {Monitoring of Land use and Land cover (LULC) changes is a highly encumbering task for humans. Therefore, machine learning based classification systems can help to deal with this challenge. In this context, this study evaluates and compares the performance of two Single Learning (SL) techniques and one Ensemble Learning (EL) technique. All the empirical evaluations were over the open source LULC dataset proposed by the German Center for Artificial Intelligence (EuroSAT), and used the performance criteria -accuracy, precision, recall, F1 score and change in accuracy for the EL classifiers-. We firstly evaluate the performance of SL techniques: Building and optimizing a Convolutional Neural Network architecture, implementing Transfer learning, and training Machine learning algorithms on visual features extracted by Deep Feature Extractors. Second, we assess EL techniques and compare them with SL classifiers. Finally, we compare the capability of EL and hyperparameter tuning to improve the performance of the Deep Learning models we built. These experiments showed that Transfer learning is the SL technique that achieves the highest accuracy and that EL can indeed outperform the SL classifiers. {\textcopyright} 2021, Springer Nature Switzerland AG.}, keywords = {Classification (of information), Convolutional neural networks, Deep feature extraction, Deep learning, Ensemble learning, Features extraction, Hyper-parameter optimizations, Land cover, Land use, Learning algorithms, Learning classifiers, Learning techniques, Network architecture, Performance, Transfer learning}, doi = {10.1007/978-3-030-86970-0_41}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85115689890\&doi=10.1007\%2f978-3-030-86970-0_41\&partnerID=40\&md5=910871e0f58b4f00460e5e2509929a23}, author = {Benbriqa, H. and Abnane, I. and Idri, A. and Tabiti, K.} } @conference {Abouaomar2021273, title = {A deep reinforcement learning approach for service migration in MEC-enabled vehicular networks}, booktitle = {Proceedings - Conference on Local Computer Networks, LCN}, volume = {2021-October}, year = {2021}, note = {cited By 4}, pages = {273-280}, abstract = {Multi-access edge computing (MEC) is a key enabler to reduce the latency of vehicular network. Due to the vehicles mobility, their requested services (e.g., infotainment services) should frequently be migrated across different MEC servers to guarantee their stringent quality of service requirements. In this paper, we study the problem of service migration in a MEC-enabled vehicular network in order to minimize the total service latency and migration cost. This problem is formulated as a nonlinear integer program and is linearized to help obtaining the optimal solution using off-the-shelf solvers. Then, to obtain an efficient solution, it is modeled as a multi-agent Markov decision process and solved by leveraging deep Q learning (DQL) algorithm. The proposed DQL scheme performs a proactive services migration while ensuring their continuity under high mobility constraints. Finally, simulations results show that the proposed DQL scheme achieves close-to-optimal performance. {\textcopyright} 2021 IEEE.}, keywords = {Deep learning, Edge computing, Infotainment, Integer programming, Learning schemes, Markov processes, Multi agent systems, Multi-access edge computing, Multiaccess, Q-learning, Quality of service, Reinforcement learning, Reinforcement learning approach, Service migration, Vehicle mobility, Vehicular networks}, doi = {10.1109/LCN52139.2021.9524882}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85118466383\&doi=10.1109\%2fLCN52139.2021.9524882\&partnerID=40\&md5=8e35dfee04f347da3f757f57f5d575c8}, author = {Abouaomar, A. and Mlika, Z. and Filali, A. and Cherkaoui, S. and Kobbane, A.} } @article {ElAkrouchi2021, title = {End-to-end LDA-based automatic weak signal detection in web news}, journal = {Knowledge-Based Systems}, volume = {212}, year = {2021}, note = {cited By 9}, abstract = {An extremely competitive business environment requires every company to monitor its competitors and anticipate future opportunities and risks, creating a dire need for competitive intelligence. In response to this need, foresight study became a prominent field, especially the concept of weak signal detection. This research area has been widely studied for its utility, but it is limited by the need of human expert judgments on these signals. Moreover, the increase in the volume of information on the Internet through blogs and web news has made the detection process difficult, which has created a need for automation. Recent studies have attempted topic modeling techniques, specifically latent Dirichlet allocation (LDA), for automating the weak signal detection process; however, these approaches do not cover all parts of the process. In this study, we propose a fully automatic LDA-based weak signal detection method, consisting of two filtering functions: the weakness function aimed at filtering topics, which potentially contains weak signals, and the potential warning function, which helps to extract only early warning signs from the previously filtered topics. We took this approach with a famous daily web news dataset, and we could detect the risk of the COVID19 pandemic at an early stage. {\textcopyright} 2020 Elsevier B.V.}, keywords = {Competition, Competitive business, Competitive intelligence, Detection process, Early warning signs, Filtering functions, Latent dirichlet allocations, Signal detection, Statistics, Topic Modeling, Weak signal detection, Weak signals}, doi = {10.1016/j.knosys.2020.106650}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097573309\&doi=10.1016\%2fj.knosys.2020.106650\&partnerID=40\&md5=a491fca334a7b047c3a02e16ecb7b3f0}, author = {El Akrouchi, M. and Benbrahim, H. and Kassou, I.} } @article {CHLIOUI20211039, title = {Ensemble case based reasoning imputation in breast cancer classification}, journal = {Journal of Information Science and Engineering}, volume = {37}, number = {5}, year = {2021}, note = {cited By 1}, pages = {1039-1051}, abstract = {Missing Data (MD) is a common drawback that affects breast cancer classification. Thus, handling missing data is primordial before building any breast cancer classifier. This paper presents the impact of using ensemble Case-Based Reasoning (CBR) imputation on breast cancer classification. Thereafter, we evaluated the influence of CBR using parameter tuning and ensemble CBR (E-CBR) with three missingness mechanisms (MCAR: Missing completely at random, MAR: Missing at random and NMAR: not missing at random) and nine percentages (10\% to 90\%) on the accuracy rates of five classifiers: Decision trees, Random forest, K-nearest neighbor, Support vector machine and Multi-layer perceptron over two Wisconsin breast cancer datasets. All experiments were implemented using Weka JAVA API code 3.8; SPSS v20 was used for statistical tests. The findings confirmed that E-CBR yields to better results compared to CBR for the five classifiers. The MD percentage affects negatively the classifier performance: As the MD percentage increases, the accuracy rates of the classifier decrease regardless the MD mechanism and technique. RF with E-CBR outperformed all the other combinations (MD technique, classifier) with 89.72\% for MCAR, 87.08\% for MAR and 86.84\% for NMAR. {\textcopyright} 2021 Institute of Information Science. All rights reserved.}, keywords = {Accuracy rate, Breast Cancer, Breast cancer classifications, Cancer classifier, Case based reasoning, Case-based reasoning imputation, Casebased reasonings (CBR), Classification (of information), Data handling, Decision trees, Diseases, Ensemble, Missing at randoms, Missing data, Nearest neighbor search, Parameters tuning, Support vector machines}, doi = {10.6688/JISE.202109_37(5).0004}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85115966179\&doi=10.6688\%2fJISE.202109_37\%285\%29.0004\&partnerID=40\&md5=97c15046a8900f9df38ec3430801c844}, author = {Chlioui, I. and Idri, A. and Abnane, I. and EZZAT, M.} } @article {Amazal2021, title = {Estimating software development effort using fuzzy clustering-based analogy}, journal = {Journal of Software: Evolution and Process}, volume = {33}, number = {4}, year = {2021}, note = {cited By 3}, abstract = {During the past decades, many studies have been carried out in an attempt to build accurate software development effort estimation techniques. However, none of the techniques proposed has proven to be successful at predicting software effort in all circumstances. Among these techniques, analogy-based estimation has gained significant popularity within software engineering community because of its outstanding performance and ability to mimic the human problem solving approach. One of the challenges facing analogy-based effort estimation is how to predict effort when software projects are described by a mixture of continuous and categorical features. To address this issue, the present study proposes an improvement of our former 2FA-kprototypes technique referred to as 2FA-cmeans. 2FA-cmeans uses a clustering technique, called general fuzzy c-means, which is a generalization of the fuzzy c-means clustering technique to cluster objects with mixed attributes. The performance of 2FA-cmeans was evaluated and compared with that of our former 2FA-kprototypes technique as well as classical analogy over six datasets that are quite diverse and have different sizes. Empirical results showed that 2FA-cmeans outperforms the two other analogy techniques using both all-in and jackknife evaluation methods. This was also confirmed by the win{\textendash}tie{\textendash}loss statistics and the Scott{\textendash}Knott test. {\textcopyright} 2020 John Wiley \& Sons, Ltd.}, keywords = {Analogy based estimation, Categorical features, Cluster analysis, Clustering techniques, Engineering community, Estimating software, Fuzzy C means clustering, Fuzzy clustering, Fuzzy systems, Human problem solving, Software design, Software development effort}, doi = {10.1002/smr.2324}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85092446413\&doi=10.1002\%2fsmr.2324\&partnerID=40\&md5=76da5a67bd24c945c3aa985d13dad134}, author = {Amazal, F.A. and Idri, A.} } @article {Alaoui2021, title = {Fall detection of elderly people using the manifold of positive semidefinite matrices}, journal = {Journal of Imaging}, volume = {7}, number = {7}, year = {2021}, note = {cited By 3}, abstract = {Falls are one of the most critical health care risks for elderly people, being, in some adverse circumstances, an indirect cause of death. Furthermore, demographic forecasts for the future show a growing elderly population worldwide. In this context, models for automatic fall detection and prediction are of paramount relevance, especially AI applications that use ambient, sensors or computer vision. In this paper, we present an approach for fall detection using computer vision techniques. Video sequences of a person in a closed environment are used as inputs to our algorithm. In our approach, we first apply the V2V-PoseNet model to detect 2D body skeleton in every frame. Specifically, our approach involves four steps: (1) the body skeleton is detected by V2V-PoseNet in each frame; (2) joints of skeleton are first mapped into the Riemannian manifold of positive semidefinite matrices of fixed-rank 2 to build time-parameterized trajectories; (3) a temporal warping is performed on the trajectories, providing a (dis-)similarity measure between them; (4) finally, a pairwise proximity function SVM is used to classify them into fall or non-fall, incorporating the (dis-)similarity measure into the kernel function. We evaluated our approach on two publicly available datasets URFD and Charfi. The results of the proposed approach are competitive with respect to state-of-the-art methods, while only involving 2D body skeletons. {\textcopyright} 2021 by the authors. Licensee MDPI, Basel, Switzerland.}, keywords = {Causes of death, Computer vision, Dynamic time warping, Elderly people, Elderly populations, Fall detection, Gram matrices, Health care, Health risks, Healthcare, Musculoskeletal system, Positive semidefinite matrices, Riemann Manifold, Similarity measure, Support vector machines}, doi = {10.3390/jimaging7070109}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111011519\&doi=10.3390\%2fjimaging7070109\&partnerID=40\&md5=fe18155309e5ff7e5a54dff191d8c05e}, author = {Alaoui, A.Y. and Tabii, Y. and Thami, R.O.H. and Daoudi, M. and Berretti, S. and Pala, P.} } @article {Bellahcen2021835, title = {Hand Pose Estimation Based on Deep Learning}, journal = {Lecture Notes in Networks and Systems}, volume = {183}, year = {2021}, note = {cited By 0}, pages = {835-843}, abstract = {The problem of 3D hand pose estimation has aroused a lot of attention in computer vision community for long time. It has been studied in computer vision for decades, as it plays a significant role in human-computer interaction such as virtual/augmented reality applications, computer graphics and robotics. Because of the practical value associated with this topic, it regained huge research interests recently due to the emergence of commodity depth cameras. But despite the recent progress in this field, robust and accurate hand pose estimation remains a challenging task due to the large pose variations, the high dimension of hand motion, the highly articulated structure, significant self-occlusion, viewpoint changes and data noises. Besides, real time performance is often desired in many applications. In this work we have tried to make a comparative study of different methods of hand pose estimation introduced recently, we worked on the implementation of our method based on Deep Learning to solve this problem. {\textcopyright} 2021, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-66840-2_63}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102614632\&doi=10.1007\%2f978-3-030-66840-2_63\&partnerID=40\&md5=815c9baf2e19d68630b5f8ef594118d3}, author = {Bellahcen, M. and Abdellaoui Alaoui, E.A. and Koum{\'e}tio T{\'e}kouabou, S.C.} } @conference {Abnane20211, title = {Heterogeneous ensemble imputation for software development effort estimation}, booktitle = {PROMISE 2021 - Proceedings of the 17th International Conference on Predictive Models and Data Analytics in Software Engineering, co-located with ESEC/FSE 2021}, year = {2021}, note = {cited By 1}, pages = {1-10}, abstract = {Choosing the appropriate Missing Data (MD) imputation technique for a given Software development effort estimation (SDEE) technique is not a trivial task. In fact, the impact of the MD imputation on the estimation output depends on the dataset and the SDEE technique used and there is no best imputation technique in all contexts. Thus, an attractive solution is to use more than one single imputation technique and combine their results for a final imputation outcome. This concept is called ensemble imputation and can help to significantly improve the estimation accuracy. This paper develops and evaluates a heterogeneous ensemble imputation whose members were the four single imputation techniques: K-Nearest Neighbors (KNN), Expectation Maximization (EM), Support Vector Regression (SVR), and Decision Trees (DT). The impact of the ensemble imputation was evaluated and compared with those of the four single imputation techniques on the accuracy measured in terms of the standardized accuracy criterion of four SDEE techniques: Case Based Reasoning (CBR), Multi-Layers Perceptron (MLP), Support Vector Regression (SVR) and Reduced Error Pruning Tree (REPTree). The Wilcoxon statistical test was also performed in order to assess whether the results are significant. All the empirical evaluations were carried out over the six datasets, namely, ISBSG, China, COCOMO81, Desharnais, Kemerer, and Miyazaki. Results show that the use of heterogeneous ensemble-based imputation instead single imputation significantly improved the accuracy of the four SDEE techniques. Indeed, the ensemble imputation technique was ranked either first or second in all contexts. {\textcopyright} 2021 ACM.}, keywords = {Case based reasoning, Casebased reasonings (CBR), Decision trees, Empirical evaluations, Expectation Maximization, Forestry, Heterogeneous ensembles, Imputation techniques, K nearest neighbor (KNN), Maximum principle, Multilayer neural networks, Nearest neighbor search, Predictive analytics, Software design, Software development effort, Support vector regression, Support vector regression (SVR)}, doi = {10.1145/3475960.3475984}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85113586361\&doi=10.1145\%2f3475960.3475984\&partnerID=40\&md5=7bd6bbfb801a84cd7694e3713d2d0081}, author = {Abnane, I. and Idri, A. and Hosni, M. and Abran, A.} } @conference {Ouardi2021790, title = {Hybrid Prototypical Networks Augmented by a Non-linear Classifier}, booktitle = {International Conference on Enterprise Information Systems, ICEIS - Proceedings}, volume = {1}, year = {2021}, note = {cited By 0}, pages = {790-794}, abstract = {Text classification is one of the most prolific domains in machine learning. Present in a raw format all around us in our daily life Starting from human to human communication mainly by the social networks apps, arriving at the human-machine interaction especially with chatbots, text is a rich source of information. However, despite the remarkable performances that deep learning achieves in this field, the cost in therm of the amount of data needed to train this model still considerably high, adding to that the need of retraining this model to learn every new task. Nevertheless, a new sub-field of machine learning has emerged, named meta-learning it targets the overcoming of those limitations, widely used for image-related tasks, it can also bring solutions to tasks associated with text. Starting from this perspective we proposed a hybrid architecture based on well-known prototypical networks consisting of adapting this model to text classification and augmenting it with a non-linear classifier. Copyright {\textcopyright} 2021 by SCITEPRESS {\textendash} Science and Technology Publications, Lda. All rights reserved.}, keywords = {Chatbots, Classification (of information), Daily lives, Deep learning, Few-shot learning, Human communications, Human machine interaction, Learning systems, Machine-learning, Metalearning, Nonlinear classifiers, Sources of informations, Text classification, Text processing}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137962135\&partnerID=40\&md5=8d0e3e181cd871cde23a9553ea783fb2}, author = {Ouardi, A.E. and Rhanoui, M. and Benlarabi, A. and Asri, B.E.} } @article {Afoudi2021, title = {Hybrid recommendation system combined content-based filtering and collaborative prediction using artificial neural network}, journal = {Simulation Modelling Practice and Theory}, volume = {113}, year = {2021}, note = {cited By 14}, abstract = {Recommendation systems are information filtering tools that present items to users based on their preferences and behavior, for example, suggestions about scientific papers or music a user might like. Based on what we said and with the development of computer science that has started to take an interest in big data and how it is used to discover user interest, we have found a lot of research going on in the area of recommendation and there are powerful systems available. In the unsupervised learning domain, this paper introduces a novel method for creating a hybrid recommender framework that combines Collaborative Filtering with Content Based Approach and Self-Organizing Map neural network technique. By testing our system on a subset of the Movies Database, we demonstrate that our method outperforms state-of-the-art methods in terms of accuracy and precision, as well as improving the efficiency of the traditional Collaborative Filtering methodology. {\textcopyright} 2021 Elsevier B.V.}, keywords = {Accuracy and precision, Collaborative filtering, Collaborative predictions, Conformal mapping, Content based filtering, Content-based approach, Hybrid recommendation, Recommender Systems, Scientific papers, Self organizing maps, Self-organizing map neural network, State-of-the-art methods, Well testing}, doi = {10.1016/j.simpat.2021.102375}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85112615681\&doi=10.1016\%2fj.simpat.2021.102375\&partnerID=40\&md5=6a08638f8df46e823320bf446b6eb978}, author = {Afoudi, Y. and Lazaar, M. and Al Achhab, M.} } @conference {Trabelsi2021281, title = {Hybrid Recommendation Systems: A State of Art}, booktitle = {International Conference on Evaluation of Novel Approaches to Software Engineering, ENASE - Proceedings}, volume = {2021-April}, year = {2021}, note = {cited By 0}, pages = {281-288}, abstract = {Recommendation systems have become more important and popular in many application areas such as music, movies, e-commerce, advertisement and social networks. Recommendation systems use either collaborative filtering, content-based filtering or hybrid filtering in order to propose items to users, and each type has its weaknesses and strengths. In this paper, we present the results of a literature review that focuses specifically on hybrid recommendation systems. The objective of this review is to identify the problems that hybrid filtering tends to solve and the different techniques used to this end. Copyright {\textcopyright} 2021 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved}, keywords = {Application area, Collaborative filtering, Content based filtering, E- commerces, Hybrid filtering, Hybrid recommendation, Literature reviews, Recommendation problem, Recommender Systems, State of art, System use}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137960955\&partnerID=40\&md5=eae4312d0264db7b508c80e8414023e3}, author = {Trabelsi, F.Z. and Khtira, A. and El Asri, B.} } @article {Meshram2021131336, title = {IBOOST: A Lightweight Provably Secure Identity-Based Online/Offline Signature Technique Based on FCM for Massive Devices in 5G Wireless Sensor Networks}, journal = {IEEE Access}, volume = {9}, year = {2021}, note = {cited By 7}, pages = {131336-131347}, abstract = {The fifth-generation (5G) wireless network is commercialized. The need to integrate massive devices in 5G and wireless sensor networks (WSN) to provide several convenient services for network users becomes imperative. However, there are growing concerns that 5G-WSNs pose new security threats to sensitive user information. User authentication and key agreement have been provided for secure end-to-end communication. However, stricter security techniques are required as billions of massive devices are being networked to collect and process complex user data in real-time. Therefore, anonymous authentication and authorization are highly coveted for privacy preservation and prevention of unlawful exploitation of user data. However, guaranteeing data integrity, authentication, and non-repudiation require special-purpose identity-based signature techniques that are quite difficult to design in practice. In order to address this problem, this paper proposes a lightweight, provably secure identity-based online/offline signature technique (IBOOST) and its extension for massive devices in 5G-WSNs using fractional chaotic maps. The IBOOST scheme achieves multi-time use of offline storage at a lower processing time. Therefore, the signer can reuse the offline pre-stored information in a polynomial time. This makes our IBOOST superior to the existing online/offline signature techniques that allow only a single signature. Additionally, the new technique enables the pre-registration process with a secret key, and no secret key is required in the offline stage. Also, the proposed IBOOST proves to be secure in the random oracle unforgeability under the chosen message attack (UF-IBS-CMA). Finally, the IBOOST and its enhanced version (A-IBOOST) give the lowest computational costs compared to several contending techniques. Therefore, the proposed IBOOST shows superior security and performance with better computational overhead than the preliminary techniques. {\textcopyright} 2013 IEEE.}, keywords = {5G mobile communication systems, 5g wireless sensor network system, Authentication, Chaotic communications, Chaotic map, Chaotic systems, Communication system security, Cryptography, Digital storage, Fractional chaotic map, Identity-based signature scheme, Lyapunov methods, Network security, Offline signatures, Polynomial approximation, Provably secure, Secure communication, Security, Sensor network systems, Wireless sensor networks}, doi = {10.1109/ACCESS.2021.3114287}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85115720532\&doi=10.1109\%2fACCESS.2021.3114287\&partnerID=40\&md5=7998ce2d4092c9f49d84029f3c9d35cd}, author = {Meshram, C. and Imoize, A.L. and Elhassouny, A. and Aljaedi, A. and Alharbi, A.R. and Jamal, S.S.} } @article {HalhoulMerabet2021, title = {Intelligent building control systems for thermal comfort and energy-efficiency: A systematic review of artificial intelligence-assisted techniques}, journal = {Renewable and Sustainable Energy Reviews}, volume = {144}, year = {2021}, note = {cited By 42}, abstract = {Building operations represent a significant percentage of the total primary energy consumed in most countries due to the proliferation of Heating, Ventilation and Air-Conditioning (HVAC) installations in response to the growing demand for improved thermal comfort. Reducing the associated energy consumption while maintaining comfortable conditions in buildings are conflicting objectives and represent a typical optimization problem that requires intelligent system design. Over the last decade, different methodologies based on the Artificial Intelligence (AI) techniques have been deployed to find the sweet spot between energy use in HVAC systems and suitable indoor comfort levels to the occupants. This paper performs a comprehensive and an in-depth systematic review of AI-based techniques used for building control systems by assessing the outputs of these techniques, and their implementations in the reviewed works, as well as investigating their abilities to improve the energy-efficiency, while maintaining thermal comfort conditions. This enables a holistic view of (1) the complexities of delivering thermal comfort to users inside buildings in an energy-efficient way, and (2) the associated bibliographic material to assist researchers and experts in the field in tackling such a challenge. Among the 20 AI tools developed for both energy consumption and comfort control, functions such as identification and recognition patterns, optimization, predictive control. Based on the findings of this work, the application of AI technology in building control is a promising area of research and still an ongoing, i.e., the performance of AI-based control is not yet completely satisfactory. This is mainly due in part to the fact that these algorithms usually need a large amount of high-quality real-world data, which is lacking in the building or, more precisely, the energy sector. Based on the current study, from 1993 to 2020, the application of AI techniques and personalized comfort models has enabled energy savings on average between 21.81 and 44.36\%, and comfort improvement on average between 21.67 and 85.77\%. Finally, this paper discusses the challenges faced in the use of AI for energy productivity and comfort improvement, and opens main future directions in relation with AI-based building control systems for human comfort and energy-efficiency management. {\textcopyright} 2021 Elsevier Ltd}, keywords = {Building-control system, Conditioning systems, Control systems, energy efficiency, Energy savings, Energy utilization, Energy-savings, Heating ventilation and air conditioning, Heating ventilation and air-conditioning system, HVAC, Information management, Intelligent buildings, Intelligent systems, Machine learning, Machine-learning, Occupant, Pattern recognition, Quality control, Systematic literature review, Systematic Review, Thermal comfort}, doi = {10.1016/j.rser.2021.110969}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103719088\&doi=10.1016\%2fj.rser.2021.110969\&partnerID=40\&md5=252263802ee70f683e9929b1ff76d93a}, author = {Halhoul Merabet, G. and Essaaidi, M. and Ben Haddou, M. and Qolomany, B. and Qadir, J. and Anan, M. and Al-Fuqaha, A. and Abid, M.R. and Benhaddou, D.} } @article {Yassine2021, title = {Intelligent recommender system based on unsupervised machine learning and demographic attributes}, journal = {Simulation Modelling Practice and Theory}, volume = {107}, year = {2021}, note = {cited By 22}, abstract = {Recommendation systems aim to predict users interests and recommend items most likely to interest them. In this paper, we propose a new intelligent recommender system that combines collaborative filtering (CF) with the popular unsupervised machine learning algorithm K-means clustering. Also, we use certain user demographic attributes such as the gender and age to create segmented user profiles, when items (movies) are clustered by genre attributes using K-means and users are classified based on the preference of items and the genres they prefer to watch. To recommend items to an active user, Collaborative Filtering approach then is applied to the cluster where the user belongs. Following the experimentation for well known movies, we show that the proposed system satisfies the predictability of the CF algorithm in GroupLens. In addition, our proposed system improves the performance and time response speed of the traditional collaborative Filtering technique and the Content-Based technique too. {\textcopyright} 2020 Elsevier B.V.}, keywords = {Collaborative filtering, Collaborative filtering techniques, Content-based techniques, Intelligent recommender system, K-means, K-means clustering, Learning algorithms, Machine learning, Most likely, Population statistics, Recommender Systems, Response speed, Unsupervised machine learning, User profile}, doi = {10.1016/j.simpat.2020.102198}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096665603\&doi=10.1016\%2fj.simpat.2020.102198\&partnerID=40\&md5=37861e6691cce74784d0555379aae57f}, author = {Yassine, A. and Mohamed, L. and Al Achhab, M.} } @article {L{\textquoteright}Amrani2021521, title = {Intermediary Technical Interoperability Component TIC Connecting Heterogeneous Federation Systems}, journal = {Studies in Computational Intelligence}, volume = {919}, year = {2021}, note = {cited By 0}, pages = {521-539}, abstract = {The spread of digital identity raises many new opportunities and challenges concerning identity. A set of identity management systems has been developed to handle such identities. The aim is both to enhance the end-user experience and to provide secure access for users. Nowadays, we have a large number of heterogeneous identity management initiatives. Proof of its eligibility for identity management is provided under the federation system. The strength of security domains within federated systems is a trusted agreement between communicating entities. However, Federated systems are challenged by the interoperability issue across those federated heterogeneous systems. This work aims to provide a technical interoperability approach for the different federations. The researchers are offering a technical interoperability component TIC, as a midway tool that will enable identity data to be interchanged between heterogeneous federations in total transparency. {\textcopyright} 2021, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-57024-8_24}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097874667\&doi=10.1007\%2f978-3-030-57024-8_24\&partnerID=40\&md5=c8c0b2dbd8eb3753758d4a6404b4fdf1}, author = {L{\textquoteright}Amrani, H. and El Bouzekri El Idrissi, Y. and Ajhoun, R.} } @article {ElAfia20211, title = {Introduction}, journal = {International Journal of Data Analysis Techniques and Strategies}, volume = {13}, number = {1-2}, year = {2021}, note = {cited By 0}, pages = {1-2}, doi = {10.1504/ijdats.2021.13.issue-1-2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85105420916\&doi=10.1504\%2fijdats.2021.13.issue-1-2\&partnerID=40\&md5=f62611b1b012adb722cd62fb8366b3a7}, author = {El Afia, A. and Faizi, R. and Garc{\'\i}a, S.} } @conference {Abouaomar2021, title = {Mean-Field Game and Reinforcement Learning MEC Resource Provisioning for SFC}, booktitle = {2021 IEEE Global Communications Conference, GLOBECOM 2021 - Proceedings}, year = {2021}, note = {cited By 0}, abstract = {In this paper, we address the resource provisioning problem for service function chaining (SFC) in terms of the placement and chaining of virtual network functions (VNFs) within a multi-access edge computing (MEC) infrastructure to reduce service delay. We consider the VNFs as the main entities of the system and propose a mean-field game (MFG) framework to model their behavior for their placement and chaining. Then, to achieve the optimal resource provisioning policy without considering the system control parameters, we reduce the proposed MFG to a Markov decision process (MDP). In this way, we leverage reinforcement learning with an actor-critic approach for MEC nodes to learn complex placement and chaining policies. Simulation results show that our proposed approach outperforms benchmark state-of-the-art approaches. {\textcopyright} 2021 IEEE.}, keywords = {Computer games, Computing infrastructures, Computing resource, e-learning, Edge computing, Markov processes, Mean-field games, Multi-access edge computing, Multiaccess, Network function virtualization, Reinforcement learning, Reinforcement learnings, Resource provisioning, Service function chaining, Service functions, Transfer functions, Virtual addresses, Virtual reality}, doi = {10.1109/GLOBECOM46510.2021.9685236}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85124062107\&doi=10.1109\%2fGLOBECOM46510.2021.9685236\&partnerID=40\&md5=8319d7756843c2ccadb8f9de4bec7eff}, author = {Abouaomar, A. and Cherkaoui, S. and Mlika, Z. and Kobbane, A.} } @article {Kharbouch2021, title = {Mobile Technology for Improved Contraceptive Care in Morocco}, journal = {Journal of Medical Systems}, volume = {45}, number = {2}, year = {2021}, note = {cited By 0}, abstract = {The fulfillment of unmet needs for contraception can help women reach their reproductive goals. Therefore, there is a growing concern worldwide about contraception and women{\textquoteright}s knowledge of making an advised choice about it. In this aspect, an outgrown number of apps are now available providing information concerning contraception whether it concerns natural contraception or modern contraception. However, vast amounts of these apps contain inaccurate sexual health facts and non-evidence-based information concerning contraception. On these bases, and in respect to: (1) the needs of women to effectively prevent unintended pregnancies while conducting a stress-free healthy lifestyle. (2) the World Health Organization (WHO) Medical Eligibility Criteria (MEC) for contraception{\textquoteright}s recommendations, and (3) the results/recommendations of a field study conducted in the reproductive health center {\textquoteleft}Les Orangers{\textquoteright} in Rabat to collect the app{\textquoteright}s requirements, we developed an evidence-based patient-centered contraceptive app referred to as {\textquoteleft}MyContraception{\textquoteright}. Thereafter, we conducted a set of functional tests to ensure that the MyContraception solution is performing as expected and is conform to the software functional requirements previously set before moving to non-functional requirements evaluation. Since customer{\textquoteright}s feedback is valuable to non-functional testing, we choose to evaluate potential users{\textquoteright} feedback. Moreover, giving that mobile app testing is a complex process involving different skill sets, we elaborated a rigorous experimental design to conduct an empirical evaluation of the MyContraception solution, which will exhaustively assess the overall quality of this solution and examine its effects on improving the quality of patient-centered contraception care. {\textcopyright} 2021, Springer Science+Business Media, LLC, part of Springer Nature.}, keywords = {adult, Article, awareness, contraception, contraceptive agent, Contraceptive Agents, contraceptive behavior, eligibility criteria, evidence based practice center, experimental design, female, female fertility, field study, health care quality, healthy lifestyle, human, Humans, injectable contraceptive agent, long-acting reversible contraception, luteinizing hormone, menstrual cycle, Methodology, mobile application, Mobile applications, Morocco, oral contraceptive agent, patient care, pill, pregnancy, prescription, Privacy, questionnaire, reliability, reproductive health, risk factor, sexual health, technology, unplanned pregnancy, women{\textquoteright}s health, World Health Organization}, doi = {10.1007/s10916-020-01684-6}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85099086862\&doi=10.1007\%2fs10916-020-01684-6\&partnerID=40\&md5=91a540c873cdd78725ec3f6987f5b90f}, author = {Kharbouch, M. and Idri, A. and Rachad, T. and Alami, H. and Redman, L. and Stelate, Y.} } @article {Hamlaoui20211, title = {Model-driven Approach To Align Heterogeneous Models Of A Complex System}, journal = {Journal of Object Technology}, volume = {20}, number = {2}, year = {2021}, note = {cited By 0}, pages = {1-24}, abstract = {To understand and manipulate a complex system, it is necessary to apply the separation of concerns and produce separate models, called viewpoints models. These models represent views on the system that correspond to distinct business domains. They are generally heterogeneous, i.e. conform to different meta-models. The management of the system{\textquoteright}s global model (a complete view of the system) requires the identification of the existing correspondences among the viewpoints models. However, in practice these correspondences are either incompletely identified or not sufficiently formalized to be maintained when models evolve. This restricts their use and does not allow their full exploitation for managing the global model. To fix this issue, we propose AHM (Alignment of Heterogeneous Models), an approach to organize the source models as a network of models through a virtual global model called M1C (Model of correspondences between models) that conforms to a Meta-Model of Correspondences (MMC). AHM proposes also a process, allowing for both the creation of the global model, and its consistency control. Partial automation of this process is done through a refining mechanism supported by a semantics expression described in a Domain Specific Language (DSL). The application of AHM is illustrated by the example of a conference management system. A prototype of a tool called Heterogeneous Matching and Consistency management Suite (HMCS) has been developed to support this approach. {\textcopyright} 2021. All Rights Reserved.}, doi = {10.5381/JOT.2021.20.2.A2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111507313\&doi=10.5381\%2fJOT.2021.20.2.A2\&partnerID=40\&md5=606f7cf84d332b83818bec57d20a3d0b}, author = {Hamlaoui, M.E. and Ebersold, S. and Bennani, S. and Anwar, A. and Dkaki, T. and Nassar, M. and Coulette, B.} } @article {Baddi2021373, title = {MSDN-GKM: Software Defined Networks Based Solution for Multicast Transmission with Group Key Management}, journal = {Studies in Computational Intelligence}, volume = {919}, year = {2021}, note = {cited By 0}, pages = {373-396}, abstract = {Multicast communication is an important requirement to support many types of applications, such as, IPTV, videoconferencing, group games. Recently this multicast applications type emerges fast, in one side more application provider proposed many applications, in other side Internet research community has proposed many different multicast routing protocols to support efficient multicast application. Therefore, the necessity of secure mechanism to provide the confidentiality and privacy of communications are more and more insistent. In current standardized IP multicast architecture, any host can join a multicast group, as source or receiver, without authentication, because no host identification information is maintained by routers, this situation leads clearly to many security risks issues. For security enhancement in multicast communication, in this paper an SDN based multicast solution with Group Key Management (GKM) approach was introduced. Our proposal solution, MSDN-GKM, includes many SDN modules to support multicast functions, group key generation, Group key exchange, storage, use, and keys replacement if any multicast group membership occurs. To prove the efficiency of our proposal solution a prototype is implemented in our SDN platform. The test-bed result proves that our proposal solution is greater to the traditional IP multicast proposed in the literature, which is reflected in two aspects: firstly, multicast metrics performance in terms of end-to-end delay, tree construction delay and delay variation. Secondly, the multicast group key management performance in terms of storage overhead and time processing. {\textcopyright} 2021, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-57024-8_17}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097877454\&doi=10.1007\%2f978-3-030-57024-8_17\&partnerID=40\&md5=90fd04903bd123b46bd5571b000609a6}, author = {Baddi, Y. and Anass, S. and Zkik, K. and Maleh, Y. and Mohammed, B. and El Kettani Mohamed Dafir, E.-C.} } @conference {Ahajjam2021, title = {On Multi-Label Classification for Non-Intrusive Load Identification using Low Sampling Frequency Datasets}, booktitle = {Conference Record - IEEE Instrumentation and Measurement Technology Conference}, volume = {2021-May}, year = {2021}, note = {cited By 0}, abstract = {Non-intrusive load monitoring (NILM) aims to infer information about the electric consumption of individual loads using the premises{\textquoteright} aggregate consumption. In this work, we target supervised multi-label classification for non-intrusive load identification. We describe how we have created a new dataset from Moroccan households using a low sampling frequency. Then, we analyze the performance of three machine learning models for NILM, and investigate the impact of signal input length on performance. {\textcopyright} 2021 IEEE.}, keywords = {Aggregate consumption, Classification (of information), Electric consumption, Load identification, Machine learning models, Measurement, Multi label classification, Non-intrusive, Nonintrusive load monitoring, Sampling frequencies}, doi = {10.1109/I2MTC50364.2021.9460059}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85113709883\&doi=10.1109\%2fI2MTC50364.2021.9460059\&partnerID=40\&md5=2f6ca1a0b8790dce2a085d0e0ff10cf4}, author = {Ahajjam, M.A. and Essayeh, C. and Ghogho, M. and Kobbane, A.} } @article {Abdelali2021164282, title = {Multiple hypothesis detection and tracking using deep learning for video traffic surveillance}, journal = {IEEE Access}, volume = {9}, year = {2021}, note = {cited By 3}, pages = {164282-164291}, abstract = {Moroccan Intelligent Transport System is the first Moroccan system that uses the latest advances in computer vision, machine learning and deep learning techniques to manage Moroccan traffic and road violations.In this paper, we propose a fully automatic approach to Multiple Hypothesis Detection and Tracking (MHDT) for video traffic surveillance.The proposed framework combines Kalman filter and data association-based tracking methods using YOLO detection approach, to robustly track vehicles in complex traffic surveillance scenes.Experimental results demonstrate that the proposed approach is robust to detect and track the trajectory of the vehicles in different situations such as scale variation, stopped vehicles, rotation, varying illumination and occlusion.The proposed approach shows a competitive results (detection: 94.10\% accuracy, tracking: 92.50\% accuracy) compared to the state-of-the-art approaches. {\textcopyright} 2021 Institute of Electrical and Electronics Engineers Inc.. All rights reserved.}, keywords = {Bandpass filters, Computer vision, Convolutional neural network, Data association, Deep learning, Deep neural networks, Detection, Intelligent systems, Intelligent vehicle highway systems, Kalman filters, Monitoring, Multiple hypothesis tracking, Object detection, Occlusion handling, Predictive models, Security systems, Target tracking, Targets tracking, Traffic control, Traffic surveillance, Vehicles, Video sequences}, doi = {10.1109/ACCESS.2021.3133529}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85121370415\&doi=10.1109\%2fACCESS.2021.3133529\&partnerID=40\&md5=84b2a2537c16a7db00901d13c8b6eb5c}, author = {Abdelali, H.A.I.T. and Derrouz, H. and Zennayi, Y. and Thami, R.O.H. and Bourzeix, F.} } @conference {Boujida2021102, title = {Neural networks based software development effort estimation: A systematic mapping study}, booktitle = {Proceedings of the 16th International Conference on Software Technologies, ICSOFT 2021}, year = {2021}, note = {cited By 0}, pages = {102-110}, abstract = {Developing an efficient model that accurately predicts the development effort of a software project is an important task in software project management. Artificial neural networks (ANNs) are promising for building predictive models since their ability to learn from previous data, adapt and produce more accurate results. In this paper, we conducted a systematic mapping study of papers dealing with the estimation of software development effort based on artificial neural networks. In total, 80 relevant studies were identified between 1993 and 2020 and classified with respect to five criteria: publication source, research approach, contribution type, techniques used in combination with ANN models and type of the neural network used. The results showed that, most ANN-based software development effort estimation (SDEE) studies applied the history-based evaluation (HE) and solution proposal (SP) approaches. Besides, the feedforward neural network was the most frequently used ANN type among SDEE researchers. To improve the performance of ANN models, most papers employed optimization methods such as Genetic Algorithms (GA) and Particle Swarm Optimization (PSO) in combination with ANN models. Copyright {\textcopyright} 2021 by SCITEPRESS {\textendash} Science and Technology Publications, Lda. All rights reserved}, keywords = {Ann models, Feedforward neural networks, Genetic algorithms, Mapping, Optimization method, Particle swarm optimization (PSO), Predictive analytics, Predictive models, Project management, Research approach, Software design, Software development effort, Software project, Software project management, Systematic mapping studies}, doi = {10.5220/0010603701020110}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111770639\&doi=10.5220\%2f0010603701020110\&partnerID=40\&md5=c4592c4b704daf3823eb5e9e3e1e5693}, author = {Boujida, F.E. and Amazal, F.A. and Idri, A.} } @article {Semmouni2021768, title = {A New Electronic Voting System Based on Elliptic Curve in a Cloud Computing Environment}, journal = {Advances in Intelligent Systems and Computing}, volume = {1383 AISC}, year = {2021}, note = {cited By 0}, pages = {768-778}, abstract = {The process of democratization is developing and promoting due to electronic voting. However, the increase of frauds and the increasing amount of attacks launched by hackers, gave birth to privacy and authentication problems. Cryptography offers multiple solutions to overcome the sensitive data protection issues in e-voting. In this paper, we study the application of elliptic curve cryptography, and use the homomorphic encryption properties to present a new electronic voting system. Our new scheme is based on the homomorphic cryptosystem EC-ELGAMAL~[7], and zero knowledge algorithm of Schnorr algorithm for identification and authentication. {\textcopyright} 2021, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Authentication, Cloud computing environments, Electronic voting, Electronic voting systems, Elliptic curve cryptography, Geometry, Ho-momorphic encryptions, Homomorphic cryptosystem, Multiple solutions, Pattern recognition, Personal computing, Privacy by design, Public key cryptography, Schnorr algorithm, Sensitive data protections, Soft computing, Voting machines}, doi = {10.1007/978-3-030-73689-7_73}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85105859364\&doi=10.1007\%2f978-3-030-73689-7_73\&partnerID=40\&md5=8bb3fcc911f5436bbd6e39378e1aec4a}, author = {Semmouni, M.C. and Azougaghe, A. and Nitaj, A. and Belkasmi, M.} } @conference {Aouragh2021211, title = {A new estimate of the n-gram language model}, booktitle = {Procedia CIRP}, volume = {189}, year = {2021}, note = {cited By 2}, pages = {211-215}, abstract = {Modeling a natural language aims to build systems that are able to reproduce, correct and more or less predict the structure of a given language; moreover it summarizes the general knowledge related to that language. In this context, we have suggested a new language model which efficiently estimate the n-gram language model. This new model has made it possible to remedy the shortcomings of the n-gram language model. By using an automatic spelling error application, the evaluation of this model has been done, and after the comparison of the results obtained; we see that this new model give quick and precise results. These results show that this new proposed model better models the succession of words within sentences, and can therefore replace the n-gram model in Natural Language Processing (NLP) applications. {\textcopyright} 2021 Elsevier B.V.. All rights reserved.}, keywords = {Arabic languages, Build systems, Computational linguistics, Correction of spelling error, Elsevier, General knowledge, Language modeling, Modeling languages, N-distant-max model, N-gram language models, Natural language processing systems, Natural languages, spelling errors}, doi = {10.1016/j.procs.2021.05.111}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85112416933\&doi=10.1016\%2fj.procs.2021.05.111\&partnerID=40\&md5=b893c0817afa2e87e8bd68f189d60faf}, author = {Aouragh, S.L. and Yousfi, A. and Laaroussi, S. and Gueddah, H. and Nejja, M.} } @article {Bouzbita2021253, title = {A New Hidden Markov Model Approach for Pheromone Level Exponent Adaptation in Ant Colony System}, journal = {Studies in Computational Intelligence}, volume = {906}, year = {2021}, note = {cited By 1}, pages = {253-267}, abstract = {We propose in this paper a Hidden Markov Model~(HMM) approach to avoid premature convergence of ants in the Ant Colony System~(ACS) algorithm. Indeed, the proposed approach was modelled as a classifier method to control the convergence through the dynamic adaptation of the α parameter that weighs the relative influence of the pheromone. The implementation was tested on several Travelling Salesman Problem~(TSP) instances with different number of cities. The proposed approach was compared with the standard ACS and the existing fuzzy logic~in the literature. The experimental results illustrate that the proposed method shows better performance. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-58930-1_17}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097964733\&doi=10.1007\%2f978-3-030-58930-1_17\&partnerID=40\&md5=220c02fd7287bc58dc79db86db73282b}, author = {Bouzbita, S. and Afia, A.E. and Faizi, R.} } @article {Achakir2021314, title = {Non-Model-Based approach for complete digitization by TLS or mobile scanner}, journal = {ISPRS Journal of Photogrammetry and Remote Sensing}, volume = {178}, year = {2021}, note = {cited By 2}, pages = {314-327}, abstract = {This paper investigates automatic digitization with complete coverage of large and complex environments using a TLS or a mobile scanner. We propose an adaptive multi-objective view-planner that can operate in an unknown environment to provide guidance for the human operator and ease the scanning task or by a mobile robot for an automatic exploration of the environment. The proposed view-planner is adapted to environments where the sensor is operating on a flat surface such as office spaces, urban areas, open fields or in some cultural heritage applications. First, we propose an adaptive gap-based method to extract occluded areas in a point cloud, which is completely automated and does not require extensive computations in a large environment such as ray-tracing or level-set methods. Then, we introduce a novel exploration strategy that uses specific regions of the environment called {\textquotedblleft}Conservative-Cells{\textquotedblright} to drastically reduce the number of sensing positions to achieve complete digitization of the environment. Both methods were validated with simulated and real point clouds. The proposed approach has been applied to a scanner carried by a mobile robot, then to data acquired by a TLS used by a human operator in a large, complex environment. Experimental results on both TLS and mobile robot show that our view-planning approach is effective in finding a sequence of positions that leads to a complete reconstruction of the environment. Moreover, the proposed approach shows efficient performance in terms of coverage rate and computational time compared to others view-planning approaches as well as the results of an experienced human operator in a large, complex environment. {\textcopyright} 2021 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS)}, keywords = {automation, Complex environments, cultural heritage, Digitisation, digitization, Human operator, instrumentation, Mobile robots, Mobile scanner, Model based approach, multiobjective programming, Next best view, Non-model-based, Numerical methods, Office buildings, Performance assessment, Personnel, Point-clouds, ray tracing, Remote sensing, Robot programming, robotics, Scanning, Seebeck effect, Simulation, urban area, Urban planning, View planning, Visibility analysis}, doi = {10.1016/j.isprsjprs.2021.06.014}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85109077721\&doi=10.1016\%2fj.isprsjprs.2021.06.014\&partnerID=40\&md5=32e85ee602b523c75ec0a6725ad46f50}, author = {Achakir, F. and El Fkihi, S. and Mouaddib, E.M.} } @article {Abou-zbiba2021311, title = {A Novel Mobile CrowdSensing Architecture for Road Safety}, journal = {Lecture Notes in Networks and Systems}, volume = {183}, year = {2021}, note = {cited By 0}, pages = {311-324}, abstract = {Intelligent Transportation Systems have become an essential part of today{\textquoteright}s transportation systems as they aim to enhance efficiency, safety and mobility. They rely particularly on various communication and sensing technologies to achieve their objectives. At this level, Mobile CrowdSensing presents a cost-efficient solution and provides interesting features for data collection which is a major component in ITS. However, it still faces some challenges such as lack of incentive mechanisms, data validation, privacy and security. These challenges motivate us to propose a Mobile CrowdSensing architecture for our future SI-CAR (Secure and Intelligent Crowdsensing Application for Road Safety) application that integrates deep learning-based data validation, edge computing-based local processing for data privacy and gamification based-incentive mechanism. {\textcopyright} 2021, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-030-66840-2_24}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102625921\&doi=10.1007\%2f978-3-030-66840-2_24\&partnerID=40\&md5=625844bcb30cf1f884057c1415e851e2}, author = {Abou-zbiba, W. and El Gadi, H. and El Bakkali, H. and Benbrahim, H. and Benhaddou, D.} } @article {Benamri2021, title = {An overview of genes and mutations associated with Chlamydiae species{\textquoteright} resistance to antibiotics}, journal = {Annals of Clinical Microbiology and Antimicrobials}, volume = {20}, number = {1}, year = {2021}, note = {cited By 4}, abstract = {Background: Chlamydiae are intracellular bacteria that cause various severe diseases in humans and animals. The common treatment for chlamydia infections are antibiotics. However, when antibiotics are misused (overuse or self-medication), this may lead to resistance of a number of chlamydia species, causing a real public health problem worldwide. Materials and methods: In the present work, a comprehensive literature search was conducted in the following databases: PubMed, Google Scholar, Cochrane Library, Science direct and Web of Science. The primary purpose is to analyse a set of data describing the genes and mutations involved in Chlamydiae resistance to antibiotic mechanisms. In addition, we proceeded to a filtration process among 704 retrieved articles, then finished by focusing on 24 studies to extract data that met our requirements. Results: The present study revealed that Chlamydia trachomatis may develop resistance to macrolides via mutations in the 23S rRNA, rplD, rplV genes, to rifamycins via mutations in the rpoB gene, to fluoroquinolones via mutations in the gyrA, parC and ygeD genes, to tetracyclines via mutations in the rpoB gene, to fosfomycin via mutations in the murA gene, to MDQA via mutations in the secY gene. Whereas, Chlamydia pneumoniae may develop resistance to rifamycins via mutations in the rpoB gene, to fluoroquinolones via mutations in the gyrA gene. Furthermore, the extracted data revealed that Chlamydia psittaci may develop resistance to aminoglycosides via mutations in the 16S rRNA and rpoB genes, to macrolides via mutations in the 23S rRNA gene. Moreover, Chlamydia suis can become resistance to tetracyclines via mutations in the tet(C) gene. In addition, Chlamydia caviae may develop resistance to macrolides via variations in the 23S rRNA gene. The associated mechanisms of resistance are generally: the inhibition of bacteria{\textquoteright}s protein synthesis, the inhibition of bacterial enzymes{\textquoteright} action and the inhibition of bacterial transcription process. Conclusion: This literature review revealed the existence of diverse mutations associated with resistance to antibiotics using molecular tools and targeting chlamydia species{\textquoteright} genes. Furthermore, these mutations were shown to be associated with different mechanisms that led to resistance. In that regards, more mutations and information can be shown by a deep investigation using the whole genome sequencing. Certainly, this can help improving to handle chlamydia infections and healthcare improvement by decreasing diseases complications and medical costs. {\textcopyright} 2021, The Author(s).}, keywords = {16S, 23S, aminoglycoside antibiotic agent, Anti-Bacterial Agents, antibiotic agent, antibiotic resistance, antiinfective agent, bacterial gene, bacterial genetics, bacterial protein, Chlamydia, Chlamydia caviae, Chlamydia Infections, Chlamydia pneumoniae, Chlamydia psittaci, Chlamydia suis, Chlamydia trachomatis, chlamydiasis, drug effect, enzyme inhibition, Fluoroquinolones, gene mutation, genetics, gyrA gene, health care cost, health care system, human, Humans, isolation and purification, macrolide, Macrolides, murA gene, mutation, nonhuman, parC gene, protein synthesis, quinoline derived antiinfective agent, quinolone derivative, Review, Ribosomal, rifamycin, Rifamycins, RNA, RNA 16S, RNA 23S, rplD gene, rplV gene, rpoB gene, secY gene, tetracycline, tetracycline derivative, Tetracyclines, whole genome sequencing, ygeD gene}, doi = {10.1186/s12941-021-00465-4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85114291851\&doi=10.1186\%2fs12941-021-00465-4\&partnerID=40\&md5=47741d00acddab5c642ac657b207e205}, author = {Benamri, I. and Azzouzi, M. and Sanak, K. and Moussa, A. and Radouani, F.} } @article {Badarneh2021926, title = {Performance Analysis of FSO Communications over F Turbulence Channels with Pointing Errors}, journal = {IEEE Communications Letters}, volume = {25}, number = {3}, year = {2021}, note = {cited By 21}, pages = {926-930}, abstract = {Recently, the Fisher-Snedecor F distribution was proposed to model the turbulence in free-space optical (FSO) communications. However, the existing model does not consider pointing error impairment. To fill this gap, in this letter, we derive novel closed-form expressions for the probability density function (PDF) and cumulative distribution function (CDF) for irradiance fluctuations in the presence of pointing error impairments. Subsequently, the PDF and CDF of the received signal-to-noise ratio (SNR) are derived and employed to obtain novel closed-form expressions for the outage probability, average bit error rate, and average ergodic capacity. To gain more insight into the impact of system and turbulence channel parameters, simple and accurate asymptotic expressions are further derived. Our analytical results are supported by Monte-Carlo simulations to validate the analysis. {\textcopyright} 1997-2012 IEEE.}, keywords = {Asymptotic expressions, Average bit-error rates, Average ergodic capacities, Bit error rate, Closed-form expression, Cumulative distribution function, Distribution functions, Errors, Free Space Optical communication, Monte Carlo methods, Optical communication, Performance analysis, Probability density function, Probability density function (PDF), Signal to noise ratio, Turbulence}, doi = {10.1109/LCOMM.2020.3042489}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097943147\&doi=10.1109\%2fLCOMM.2020.3042489\&partnerID=40\&md5=d5175f79c64955ba088a76dbd3e88c91}, author = {Badarneh, O.S. and Derbas, R. and Almehmadi, F.S. and El Bouanani, F. and Muhaidat, S.} } @article {Illi202190, title = {Physical Layer Security of a Dual-Hop Regenerative Mixed RF/UOW System}, journal = {IEEE Transactions on Sustainable Computing}, volume = {6}, number = {1}, year = {2021}, note = {cited By 15}, pages = {90-104}, abstract = {Ensuring physical layer security is a crucial task in conventional and emerging communication systems, which are typically characterized by stringent quality of service and security requirements. This also accounts for wireless technologies in the context of the Internet of Things paradigm, which are expected to exhibit considerably increased computational complexity. Based on this, the present contribution investigates the secrecy outage performance of a dual-hop decode-and-forward (DF) mixed radio-frequency/underwater optical wireless communication (RF/UOWC) system. Such wireless network configurations are particularly useful in efficient and demanding scenarios, such as military communications. Therefore, our analysis considers one single-antenna source node (SS) communicating with one legitimate destination node (DD) via a DF relay node (RR) equipped with multiple antennas for reception. Particularly, the relay receives the incoming signal from SS via an RF link, applies selection-combining (SC) technique, fully decodes it, re-encodes it, and then forwards it to the destination via a UOWC link. The communication is performed under the eavesdropper{\textquoteright}s attempt to intercept the S-RS-R hop (RF side). In this context, a closed-form expression for the secrecy outage probability is derived along with a thorough asymptotic analysis in the high SNR regime, based on which the achievable diversity order is provided. The offered results provide useful insights on the impact of some key system and channel parameters on the secrecy outage performance, such as the number of eavesdroppers, the number of relay antennas, fading severity parameters of RF links, and water turbulence severity of the UOWC link. The conducted analysis shows that the secrecy outage probability is dominated only by the RR-DD link in the high SNR regime, regardless of the SS-RR parameters, such as the number of relay antennas and the average SNR at the relay branches. The offered analytic results are corroborated with respective results from computer simulations. Since these parameters are closely related with the computational complexity at the involved terminals, the offered insights are useful for the design and computationally sustainable operation of such systems. {\textcopyright} 2016 IEEE.}, keywords = {Antenna sources, Antennas, Asymptotic analysis, Complex networks, Computational complexity, Cooperative communication, Decoding, Dual-hop relaying, Fading channels, Military communications, Network layers, Optical communication, Performances analysis, Physical layer security, Probability, Quality of service, Secrecy outage probabilities, Secrecy outages, Selection combining, Signal to noise ratio, Single antenna, Source nodes, Underwater optical wireless communication}, doi = {10.1109/TSUSC.2019.2906545}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85098369882\&doi=10.1109\%2fTSUSC.2019.2906545\&partnerID=40\&md5=3021a37f73c102416a7e51dd6c971930}, author = {Illi, E. and Bouanani, F.E. and Da Costa, D.B. and Sofotasios, P.C. and Ayoub, F. and Mezher, K. and Muhaidat, S.} } @article {Bourja2021915, title = {Real Time Vehicle Detection, Tracking, and Inter-vehicle Distance Estimation based on Stereovision and Deep Learning using Yolov3}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {12}, number = {8}, year = {2021}, note = {cited By 1}, pages = {915-923}, abstract = {Abstract{\textemdash}In this paper, we propose a robust real-time vehicle tracking and inter-vehicle distance estimation algorithm based on stereovision. Traffic images are captured by a stereoscopic system installed on the road, and then we detect moving vehicles with the YOLO V3 Deep Neural Network algorithm. Thus, the real-time video goes through an algorithm for stereoscopy-based measurement in order to estimate distance between detected vehicles. However, detecting the real-time objects have always been a challenging task because of occlusion, scale, illumination etc. Thus, many convolutional neural network models based on object detection were developed in recent years. But they cannot be used for real-time object analysis because of slow speed of recognition. The model which is performing excellent currently is the unified object detection model which is You Only Look Once (YOLO). But in our experiment, we have found that despite of having a very good detection precision, YOLO still has some limitations. YOLO processes every image separately even in a continuous video or frames. Because of this much important identification can be lost. So, after the vehicle detection and tracking, inter-vehicle distance estimation is done. {\textcopyright} 2021. International Journal of Advanced Computer Science and Applications. All Rights Reserved.}, keywords = {Bounding-box, Convolution, Convolutional neural network, Convolutional neural networks, Deep neural networks, Distance estimation, Estimation algorithm, Object detection, Object recognition, Real- time, Stereo image processing, Stereoimages, Stereovision, Tracking, Vehicles, Vehicles detection, YOLOv3 deep neural network}, doi = {10.14569/IJACSA.2021.01208101}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85118990281\&doi=10.14569\%2fIJACSA.2021.01208101\&partnerID=40\&md5=225926d9445a4fed55a125a55e519b2f}, author = {Bourja, O. and Derrouz, H. and Abdelali, H.A. and Maach, A. and Thami, R.O.H. and Bourzeix, F.} } @conference {Remli2021328, title = {Reference Architecture for Efficient Computer Integrated Manufacturing}, booktitle = {International Conference on Enterprise Information Systems, ICEIS - Proceedings}, volume = {1}, year = {2021}, note = {cited By 0}, pages = {328-334}, abstract = {The technological progress combined with the rapidly changing customer demands are pushing for continuous changes in manufacturing environments. This led industrial companies into seeking the optimization of their processes through Computer Integrated Manufacturing (CIM). The main purpose of the latter is to link the shop floor systems to the high business layer ones. Based on a literature review that we have conducted earlier on CIM architectures, we have identified the different aspects related to CIM and detected the limitations of the existing approaches. With the aim of overcoming these limitations, we present in this paper a reference architecture for CIM based on the ISA-95 standard. We also explain how the proposed architecture was applied on a case study from the automotive industry. Copyright {\textcopyright} 2021 by SCITEPRESS {\textendash} Science and Technology Publications, Lda. All rights reserved.}, keywords = {Automotive industry, Computer integrated manufacturing, Computer-integrated manufacturing, Customer demands, Industrial companies, Manufacturing environments, Optimisations, Reference architecture, Shopfloors, Smart manufacturing, Systems architecture, Technological progress}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137946996\&partnerID=40\&md5=3dc1bfa95e09b3a460540e7f31304823}, author = {Remli, A. and Khtira, A. and El Asri, B.} } @conference {ElAkrouchi2021, title = {Review on adopting concept extraction in weak signals detection in competitive intelligence}, booktitle = {ACM International Conference Proceeding Series}, year = {2021}, note = {cited By 0}, abstract = {The dynamic nature of competition in the business environment makes a company{\textquoteright}s ability to secure future change more critical to its survival. Consequently, efficient exploitation of valuable intel is globally acknowledged as an essential foundation of competitive advantage, leading to Competitive Intelligence. Besides, one of the crucial keys to successful competitive information securing is studying the future. Thus, predicting what may happen in the uncertain future is a leading-edge technology leading to an extensive need for foresight analysis. Foresight study uses various methods to recognize future developments and make plans that anticipate possible future changes. One of the leading techniques used in foresight is detecting and understanding Weak Signals. But knowing the nature of these signals, automatically scanning them is still considered a difficult task. For this, we examine the Concept Extraction technique as a main step to detect weak signals from documents automatically. In this paper, we will explain the concept extraction methods used so far, and we present in detail all the main methods and approaches and their application in detecting weak signals. {\textcopyright} 2021 Association for Computing Machinery.}, keywords = {Business environments, Competition, Competitive advantage, Competitive intelligence, Concept extraction, Data mining, Dynamic nature, Extraction, Leading edge technology, Possible futures, Signal detection, Topic Modeling, Weak signal detection, Weak signals}, doi = {10.1145/3485557.3485560}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85121505750\&doi=10.1145\%2f3485557.3485560\&partnerID=40\&md5=01161f382ecb38a8faf2883a2bc37003}, author = {El Akrouchi, M. and Benbrahim, H. and Kassou, I.} } @article {ElHaddaoui2021336, title = {Sentiment analysis: A review and framework foundations}, journal = {International Journal of Data Analysis Techniques and Strategies}, volume = {13}, number = {4}, year = {2021}, note = {cited By 0}, pages = {336-355}, abstract = {The rise of social media as a platform for opinion expression and social interactions motivated the need for an automated data analysis technique for business value extraction with optimal investment considerations. In this respect, sentiment analysis (SA) becomes the de facto approach to investigate generated data and retrieve information such as sentiments and emotions, discussed topics, etc., via traditional machine learning and modern neural network-based algorithms. The current techniques achieve reasonable accuracy scores but their performance evolution is depending on the context of application, also most implementations are complex and non-reusable components. Our literature review shows a lack in research studies to unify existing systems under a common framework for SA tasks. This paper also highlights the rending movement of neural networks approaches and pinpoint recent research studies for SA sub tasks. A SA framework design proposition is presented based on key research projects and enhanced with other promising works. {\textcopyright} 2021 Inderscience Publishers. All rights reserved.}, doi = {10.1504/IJDATS.2021.120112}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85122875189\&doi=10.1504\%2fIJDATS.2021.120112\&partnerID=40\&md5=e3d2033b898f101c2ec5d087fdf6472e}, author = {El Haddaoui, B. and Chiheb, R. and Faizi, R. and El Afia, A.} } @article {Abid20211463, title = {Simulation optimisation methods applied in reverse logistics: a systematic review}, journal = {International Journal of Sustainable Engineering}, volume = {14}, number = {6}, year = {2021}, note = {cited By 0}, pages = {1463-1483}, abstract = {Reverse logistics has grown in importance with the increase in environmental sustainability issues. However, reverse logistics is often associated with complex and difficult uncertainty issues, the vagueness of information, and the numerous decision variables and constraints. To address these issues, researchers deployed a class of effective methods called simulation-based optimisation. The aim of this study is to perform a systematic review of the available reverse logistics literature, discussing reverse logistic and simulation-based optimisation, highlighting the research gap and setting future directions. This work identifies: (1) various research design and methodologies used in reverse logistics literature; (2) the most explored areas of reverse logistics and discusses the sector where the exploration can be redirected; (3) the various simulation-based optimisation (SO) methods; and (4) the most dominant SO methods for solving reverse logistics problems. This study will be useful to researchers and practitioners in finding new research opportunities in the field of reverse logistics. {\textcopyright} 2021 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {Decision constraints, Decision variables, Environmental sustainability, Optimization, Optimization method, Reverse logistics, Simulation optimization method, Simulation-based optimizations, Sustainability issues, Sustainable development, Systematic Review, Uncertainty}, doi = {10.1080/19397038.2021.2003470}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85119952129\&doi=10.1080\%2f19397038.2021.2003470\&partnerID=40\&md5=dd9095deb5eaf7d2eb4fffd798e2e34d}, author = {Abid, S. and Mhada, F.Z.} } @conference {Chaayra2021, title = {Statistical Analysis of Uplink Massive MIMO Systems for MRC Linear Receivers over Weibull Fading Channels}, booktitle = {Proceedings - 4th International Conference on Advanced Communication Technologies and Networking, CommNet 2021}, year = {2021}, note = {cited By 0}, abstract = {This paper investigates the performance of maximum-ratio combining (MRC) linear receivers in a massive multiple-input multiple-output (mMIMO) uplink communication system, that in terms of their signal-to-interference-plus-noise ratio (SINR) operating under independent flat Weibull multipath fading channels (WFCs). Based on a tight approximate probability density function (PDF) expression of the signal-to-noise ratio at the considered receiver output, we derive new accurate closed-form expressions of PDF, outage probability (OP) for mMIMO employing MRC technique. The results show high accuracy for significant values of K{\texttimes}Nr mMIMO system at high/low transmission power and severity fading parameters as well. Indeed, the greater K{\texttimes}Nr, the better the PDF{\textquoteright}s accuracy, therefore, the better is the OP. Numerical outcomes have been assessed by using Mathematica Software to show up our results. {\textcopyright} 2021 IEEE.}, keywords = {Cummulative density function, G-functions, Linear receiver, Massive MIMO, Maximum ratio, Maximum-ratio-combining, Meije G-function, MIMO systems, Multipath fading, Multipath propagation, Outage probability, Probability density function, Signal interference, Signal receivers, Signal to noise ratio, Signalto-interference-plus-noise ratios (SINR), Weibull distribution, Weibull fading channel}, doi = {10.1109/CommNet52204.2021.9641935}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85123984751\&doi=10.1109\%2fCommNet52204.2021.9641935\&partnerID=40\&md5=3e44ae3af40139cd08615a702accf1ac}, author = {Chaayra, T. and El Ansari, Y. and El Bouanani, F. and Ben-Azza, H.} } @article {Hosni20212827, title = {A systematic mapping study for ensemble classification methods in cardiovascular disease}, journal = {Artificial Intelligence Review}, volume = {54}, number = {4}, year = {2021}, note = {cited By 5}, pages = {2827-2861}, abstract = {Ensemble methods overcome the limitations of single machine learning techniques by combining different techniques, and are employed in the quest to achieve a high level of accuracy. This approach has been investigated in various fields, one of them being that of bioinformatics. One of the most frequent applications of ensemble techniques involves research into cardiovascular diseases, which are considered the leading cause of death worldwide. The purpose of this research work is to identify the papers that investigate ensemble classification techniques applied to cardiology diseases, and to analyse them according to nine aspects: their publication venues, the medical tasks tackled, the empirical and research types adopted, the types of ensembles proposed, the single techniques used to construct the ensembles, the validation frameworks adopted to evaluate the proposed ensembles, the tools used to build the ensembles, and the optimization methods employed for the single techniques. This paper reports the carrying out of a systematic mapping study. An extensive automatic search in four digital libraries: IEEE Xplore, ACM Digital Library, PubMed, and Scopus, followed by a study selection process, resulted in the identification of 351 papers that were used to address our mapping questions. This study found that the papers selected had been published in a large number of different resources. The medical task addressed most frequently by the selected studies was diagnosis. In addition, the experiment-based empirical type and evaluation-based research type were the most dominant approaches adopted by the selected studies. Homogeneous ensembles were the ensemble type that was developed most often in literature, while decision trees, artificial neural networks and Bayesian classifiers were the single techniques used most frequently to develop ensemble classification methods. The weighted majority and majority voting rules were adopted to obtain the final decision of the ensembles developed. With regard to evaluation frameworks, the datasets obtained from the UCI and PhysioBank repositories were those used most often to evaluate the ensemble methods, while the k-fold cross-validation method was the most frequently-employed validation technique. Several tools with which to build ensemble classifiers were identified, and the type of software adopted with the greatest frequency was open source. Finally, only a few researchers took into account the optimization of the parameter settings of either single or meta ensemble classifiers. This mapping study attempts to provide a greater insight into the application of ensemble classification methods in cardiovascular diseases. The majority of the selected papers reported positive feedback as regards the ability of ensemble methods to perform better than single methods. Further analysis is required to aggregate the evidence reported in literature. {\textcopyright} 2020, Springer Nature B.V.}, keywords = {Bayesian networks, Cardio-vascular disease, Cardiology, Decision trees, Diagnosis, Digital libraries, Diseases, Ensemble classification, Ensemble classifiers, Evaluation framework, K fold cross validations, Learning systems, Majority voting rules, Mapping, Open source software, Open systems, Optimization method, Systematic mapping studies}, doi = {10.1007/s10462-020-09914-6}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85091735819\&doi=10.1007\%2fs10462-020-09914-6\&partnerID=40\&md5=69ea4b02de420c3ec6a85e1f3c7dddaf}, author = {Hosni, M. and Carrillo de Gea, J.M. and Idri, A. and El Bajta, M. and Fern{\'a}ndez Alem{\'a}n, J.L. and Garc{\'\i}a-Mateos, G. and Abnane, I.} } @conference {Lahrichi2021322, title = {Toward a multimodal multitask model for neurodegenerative diseases diagnosis and progression prediction}, booktitle = {Proceedings of the 10th International Conference on Data Science, Technology and Applications, DATA 2021}, year = {2021}, note = {cited By 1}, pages = {322-328}, abstract = {Recent studies on modelling the progression of Alzheimer{\textquoteright}s disease use a single modality for their predictions while ignoring the time dimension. However, the nature of patient data is heterogeneous and time dependent which requires models that value these factors in order to achieve a reliable diagnosis, as well as making it possible to track and detect changes in the progression of patients{\textquoteright} condition at an early stage. This article overviews various categories of models used for Alzheimer{\textquoteright}s disease prediction with their respective learning methods, by establishing a comparative study of early prediction and detection Alzheimer{\textquoteright}s disease progression. Finally, a robust and precise detection model is proposed. Copyright {\textcopyright} 2021 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved}, keywords = {Alzheimer{\textquoteright}s disease, Comparative studies, Data Science, Detection models, Diagnosis, Early prediction, Forecasting, Hospital data processing, Learning methods, Learning systems, Multi-task model, Neurodegenerative diseases, Patients{\textquoteright} conditions, Time dependent}, doi = {10.5220/0010600003220328}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111735908\&doi=10.5220\%2f0010600003220328\&partnerID=40\&md5=1f255250606020cc06dfdc4c73cc29c0}, author = {Lahrichi, S. and Rhanoui, M. and Mikram, M. and Asri, B.E.} } @conference {Trabelsi2021341, title = {Towards an Approach of Recommendation in Business Processes Using Decision Trees}, booktitle = {Proceedings - 2021 International Symposium on Computer Science and Intelligent Controls, ISCSIC 2021}, year = {2021}, note = {cited By 0}, pages = {341-347}, abstract = {A recommender system analyses users{\textquoteright} data in order to extract their interests and preferences and suggest them relevant items. The recommendation systems have shown their applicability in many domains, especially in business processes (BP). Business processes are defined as a set of tasks that are performed by an organization to achieve a business goal. Using recommendation techniques in business processes consists of proposing relevant tasks at a certain point, which helps managers making the right decisions. In this paper, we propose an approach of recommending in BPMN-based business processes. The recommendation technique that we considered in this approach is the decision trees. {\textcopyright} 2021 IEEE.}, keywords = {BPMN model, Business goals, Business Process, Decision trees, Forestry, Machine learning, Machine-learning, Recommendation techniques, Recommender Systems, User data}, doi = {10.1109/ISCSIC54682.2021.00068}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85124144617\&doi=10.1109\%2fISCSIC54682.2021.00068\&partnerID=40\&md5=aa31af21d6c3ad066ee71e9e9456ad84}, author = {Trabelsi, F.Z. and Khtira, A. and El Asri, B.} } @article {Hosni2021, title = {On the value of filter feature selection techniques in homogeneous ensembles effort estimation}, journal = {Journal of Software: Evolution and Process}, volume = {33}, number = {6}, year = {2021}, note = {cited By 1}, abstract = {Software development effort estimation (SDEE) remains as the principal activity in software project management planning. Over the past four decades, several methods have been proposed to estimate the effort required to develop a software system, including more recently machine learning (ML) techniques. Because ML performance accuracy depends on the features that feed the ML technique, selecting the appropriate features in the preprocessing data step is important. This paper investigates three filter feature selection techniques to check the predictive capability of four single ML techniques: K-nearest neighbor, support vector regression, multilayer perceptron, and decision trees and their homogeneous ensembles over six well-known datasets. Furthermore, the single and ensembles techniques were optimized using the grid search optimization method. The results suggest that the three filter feature selection techniques investigated improve the reasonability and the accuracy performance of the four single techniques. Moreover, the homogeneous ensembles are statistically more accurate than the single techniques. Finally, adopting a random process (i.e., random subspace method) to select the inputs feature for ML technique is not always effective to generate an accurate homogeneous ensemble. {\textcopyright} 2021 John Wiley \& Sons, Ltd.}, keywords = {Decision trees, Effort Estimation, Feature extraction, K-nearest neighbors, Multilayer neural networks, Nearest neighbor search, Object oriented programming, Predictive capabilities, Project management, Random processes, Random subspace method, Selection techniques, Software design, Software development effort, Software project management, Software systems, Support vector regression}, doi = {10.1002/smr.2343}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103402056\&doi=10.1002\%2fsmr.2343\&partnerID=40\&md5=0f4c04b547f3d628d6db8d65b74912e5}, author = {Hosni, M. and Idri, A. and Abran, A.} } @article {AitAbdelali2021517, title = {Visual Vehicle Tracking via Deep Learning and Particle Filter}, journal = {Advances in Intelligent Systems and Computing}, volume = {1188}, year = {2021}, note = {cited By 2}, pages = {517-526}, abstract = {Visual vehicle tracking is one of the most challenging research topics in computer vision. In this paper, we propose a novel and efficient approach based on the particle filter technique and deep learning for multiple vehicle tracking, where the main focus is to associate vehicles efficiently for online and real-time applications. Experimental results illustrate the effectiveness of the system we are proposing. {\textcopyright} 2021, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd.}, keywords = {Deep learning, Monte Carlo methods, Particle filter, Real-time application, Research topics, Soft computing, Vehicles}, doi = {10.1007/978-981-15-6048-4_45}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096559933\&doi=10.1007\%2f978-981-15-6048-4_45\&partnerID=40\&md5=51a9b5456004a8cdb4bc0a5256e17dc9}, author = {Ait Abdelali, H. and Bourja, O. and Haouari, R. and Derrouz, H. and Zennayi, Y. and Bourzex, F. and Oulad Haj Thami, R.} } @article {Haitam202013, title = {Advanced modified direct torque control with space vector modulation based on active disturbance rejection control for induction motor sensorless drive}, journal = {International Journal of Mechanical Engineering and Robotics Research}, volume = {9}, number = {1}, year = {2020}, note = {cited By 2}, pages = {13-19}, abstract = {This work addresses the problems inherent in the disturbances affected to the operation of electrical motor drives, a modified direct torque control which is among the excellent method of torque control of an induction motor that provide a decoupled control of flux and torque is implemented with a novel controller of active disturbance rejection controller to cancel the drawbacks of flux ripples, high torque at start-up and also variable switching frequency associated in the classical DTC. In the other side, an advanced estimator for rotor speed is used in order to rectify the problem of sensors. The proposed control strategy, implementation data, and simulations with MDTC are presented and discussed. It is concluded that MDTC-SVM proposed control topology offers high performance in steady-state operation despite the existence of the internal and external disturbances. {\textcopyright} 2020 by the authors.}, doi = {10.18178/ijmerr.9.1.13-19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85081411340\&doi=10.18178\%2fijmerr.9.1.13-19\&partnerID=40\&md5=c5c6e74c1aa49a6e9a869fb6152a5a1d}, author = {Haitam, C. and Ahmed, E. and Tamou, N.} } @article {Zarnoufi2020182, title = {AI to prevent cyber-violence: Harmful behaviour detection in social media}, journal = {International Journal of High Performance Systems Architecture}, volume = {9}, number = {4}, year = {2020}, note = {cited By 1}, pages = {182-191}, abstract = {Social media has allowed people to communicate freely. This total freedom has led to the emergence of cyber-violence with a growing number of victims. Many researches in psychology and e-health have been conducted to detect the act of cyber-violence. In computational field, most of works have focused on multiple aspects of cyber-violence, but none of them, to our knowledge, have studied the perpetrator{\textquoteright}s harmful behaviour from an emotional dimension. Our goal in this work is to discover the relationship between the emotional state of social media users and their harmful behaviour while engaged in the act of cyber-violence. Our approach is based on Ensemble Machine Learning and engineered features related to Plutchik wheel of basic emotions extracted with semantic similarity and word embedding. The results show a significant association between the individual{\textquoteright}s emotional state and the harmful intent, which may be a good indicator for cyber-violence detection. Copyright {\textcopyright} 2020 Inderscience Enterprises Ltd.}, keywords = {Basic emotions, Behaviour detections, Computational field, Emotional dimensions, Emotional state, Semantic similarity, Semantics, social media, Social networking (online), Violence detections}, doi = {10.1504/IJHPSA.2020.113679}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102891888\&doi=10.1504\%2fIJHPSA.2020.113679\&partnerID=40\&md5=3e11e2e6a4032f608b4b71f2d426dbca}, author = {Zarnoufi, R. and Boutbi, M. and Abik, M.} } @article {Hakkoum202015, title = {Artificial Neural Networks Interpretation Using LIME for Breast Cancer Diagnosis}, journal = {Advances in Intelligent Systems and Computing}, volume = {1161 AISC}, year = {2020}, note = {cited By 6}, pages = {15-24}, abstract = {Breast Cancer (BC) is the most common type of cancer among women. Thankfully early detection and treatment improvements helped decrease its number of deaths. Data Mining techniques (DM), which discover hidden and potentially useful patterns from data, particularly for breast cancer diagnosis, are witnessing a new era, where the main objective is no longer replacing humans or just assisting them in their tasks but enhancing and augmenting their capabilities and this is where interpretability comes into play. This paper aims to investigate the Local Interpretable Model-agnostic Explanations (LIME) technique to interpret a Multilayer perceptron (MLP) trained on the Wisconsin Original Data-set. The results show that LIME explanations are a sort of real-time interpretation that helps understanding how the constructed neural network {\textquotedblleft}thinks{\textquotedblright} and thus can increase trust and help oncologists, as the domain experts, learn new patterns. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Breast Cancer, Breast cancer diagnosis, Data mining, Diseases, Domain experts, Information systems, Information use, Interpretability, Lime, Multi layer perceptron, Neural networks, Real time, Useful patterns, WISCONSIN}, doi = {10.1007/978-3-030-45697-9_2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085506923\&doi=10.1007\%2f978-3-030-45697-9_2\&partnerID=40\&md5=cf809f0c6b9c95cd37d48038f1dfa79a}, author = {Hakkoum, H. and Idri, A. and Abnane, I.} } @article {Idri20201239, title = {Assessing the impact of parameters tuning in ensemble based breast Cancer classification}, journal = {Health and Technology}, volume = {10}, number = {5}, year = {2020}, note = {cited By 12}, pages = {1239-1255}, abstract = {Breast cancer is one of the major causes of death among women. Different decision support systems were proposed to assist oncologists to accurately diagnose their patients. These decision support systems mainly used classification techniques to categorize the diagnosis into Malign or Benign tumors. Given that no consensus has been reached on the classifier that can perform best in all circumstances, ensemble-based classification, which classifies patients by combining more than one single classification technique, has recently been investigated. In this paper, heterogeneous ensembles based on three well-known machine learning techniques (support vector machines, multilayer perceptron, and decision trees) were developed and evaluated by investigating the impact of parameter values of the ensemble members on classification performance. In particular, we investigate three parameters tuning techniques: Grid Search (GS), Particle Swarm Optimization (PSO) and the default parameters of the Weka Tool to evaluate whether setting ensemble parameters permits more accurate classification in breast cancer over four datasets obtained from the Machine Learning repository. The heterogeneous ensembles of this study were built using the majority voting technique as a combination rule. The overall results obtained suggest that: (1) Using GS or PSO techniques for single techniques provide more accurate classification; (2) In general, ensembles generate more accurate classification than their single techniques regardless of the optimization techniques used. (3) Heterogeneous ensembles based on optimized single classifiers generate better results than the Uniform Configuration of Weka (UC-WEKA) ensembles, and (4) PSO and GS slightly have the same impact on the performances of ensembles. {\textcopyright} 2020, IUPESM and Springer-Verlag GmbH Germany, part of Springer Nature.}, keywords = {accuracy, Article, Breast Cancer, cancer classification, classifier, decision tree, experimental design, grid search, human, multilayer perceptron, particle swarm optimization, recall, support vector machine}, doi = {10.1007/s12553-020-00453-2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85087361107\&doi=10.1007\%2fs12553-020-00453-2\&partnerID=40\&md5=7398903f3007d71e535b12c2ef9a90a6}, author = {Idri, A. and Bouchra, E.O. and Hosni, M. and Abnane, I.} } @article {Bouzbita20205436, title = {The behaviour of ACS-TSP algorithm when adapting both pheromone parameters using fuzzy logic controller}, journal = {International Journal of Electrical and Computer Engineering}, volume = {10}, number = {5}, year = {2020}, note = {cited By 4}, pages = {5436-5444}, abstract = {In this paper, an evolved ant colony system (ACS) is proposed by dynamically adapting the responsible parameters for the decay of the pheromone trails ξ and using fuzzy logic controller (FLC) applied in the travelling salesman problems (TSP). The purpose of the proposed method is to understand the effect of both parameters ξ and on the performance of the ACS at the level of solution quality and convergence speed towards the best solutions through studying the behaviour of the ACS algorithm during this adaptation. The adaptive ACS is compared with the standard one. Computational results show that the adaptive ACS with dynamic adaptation of local pheromone parameter ξ is more effective compared to the standard ACS. {\textcopyright} 2020 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/IJECE.V10I5.PP5436-5444}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85087312967\&doi=10.11591\%2fIJECE.V10I5.PP5436-5444\&partnerID=40\&md5=fb268b2912214f5d902a8ec0f867a2cd}, author = {Bouzbita, S. and El Afia, A. and Faizi, R.} } @conference {Abbal2020, title = {Bi-level multi-capacitated facility location problem}, booktitle = {Proceedings - 2020 5th International Conference on Logistics Operations Management, GOL 2020}, year = {2020}, note = {cited By 1}, abstract = {Facility location problems can be considered as a century-old science. Studies on this subject began in the 1950s and are still relevant today. In 2016, a new variant of the location problems, called {\textquoteright}budget constraint multi-capacitated location problem{\textquoteright} (BMCLP), is published to generalize the p-median one. In this work, we propose a generalization of the BMCLP into a bilevel location problem (plants-depots and depots-customers) in order to consider, the modern supply chain constraints. We therefore established a mathematical formulation for the new problem, named {\textquoteright}bi-level multi-capacitated facility location problem{\textquoteright} and we created data instances adapted to this new formulation based on semi-random factors and certain generation methods available in the literature. In order to validate the latter formulation, we proposed Branch Cut solving method used by CPLEX solver. Finally, we present the obtained computational results, which prove the efficiency of the new formulation for the tested instances. {\textcopyright} 2020 IEEE.}, keywords = {Budget constraint, Budget control, Capacitated facility location problems, Capacitated location, Computational results, Facility location problem, Generation method, Location, Location problems, Mathematical formulation, Supply chains}, doi = {10.1109/GOL49479.2020.9314710}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85100167979\&doi=10.1109\%2fGOL49479.2020.9314710\&partnerID=40\&md5=2a6d370ea7da84d66da95771f24a1900}, author = {Abbal, K. and Benadada, Y. and Mohammed, E.A.} } @article {Zennou2020360, title = {Boosting Sequential Consistency Checking Using Saturation}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12302 LNCS}, year = {2020}, note = {cited By 1}, pages = {360-376}, abstract = {We address the problem of checking that an execution of a shared memory concurrent program is sequentially consistent (SC). This problem is NP-hard due to the necessity of finding a total order between the write operations that induces an acyclic happen-before relation. We propose an approach allowing to avoid falling systematically in the worst case, and to check SCness in polynomial-time in most cases in practice. The approach is based on a simple yet powerful saturation-based procedure for computing write constraints that must hold for SCness, allowing on one hand fast detection of SC violations, and on the other hand reducing drastically the search space for a total order witnessing SCness. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Concurrent program, Fast detections, NP-hard, Polynomial approximation, Polynomial-time, Search spaces, Sequential consistency, Shared memory, Total order, Write operations}, doi = {10.1007/978-3-030-59152-6_20}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85093860805\&doi=10.1007\%2f978-3-030-59152-6_20\&partnerID=40\&md5=a60ea8b0396b220e92dc8ef992c37f30}, author = {Zennou, R. and Atig, M.F. and Biswas, R. and Bouajjani, A. and Enea, C. and Erradi, M.} } @article {Chlioui202061, title = {Comparing Statistical and Machine Learning Imputation Techniques in Breast Cancer Classification}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12252 LNCS}, year = {2020}, note = {cited By 1}, pages = {61-76}, abstract = {Missing data imputation is an important task when dealing with crucial data that cannot be discarded such as medical data. This study evaluates and compares the impacts of two statistical and two machine learning imputation techniques when classifying breast cancer patients, using several evaluation metrics. Mean, Expectation-Maximization (EM), Support Vector Regression (SVR) and K-Nearest Neighbor (KNN) were applied to impute 18\% of missing data missed completely at random in the two Wisconsin datasets. Thereafter, we empirically evaluated these four imputation techniques when using five classifiers: decision tree (C4.5), Case Based Reasoning (CBR), Random Forest (RF), Support Vector Machine (SVM) and Multi-Layer Perceptron (MLP). In total, 1380 experiments were conducted and the findings confirmed that classification using imputation based machine learning outperformed classification using statistical imputation. Moreover, our experiment showed that SVR was the best imputation method for breast cancer classification. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Breast cancer classifications, Case based reasoning, Casebased reasonings (CBR), Decision trees, Diseases, Expectation Maximization, Imputation techniques, K nearest neighbor (KNN), Learning systems, Maximum principle, Missing data imputations, Multi layer perceptron, Multilayer neural networks, Nearest neighbor search, Support vector machines, Support vector regression, Support vector regression (SVR)}, doi = {10.1007/978-3-030-58811-3_5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85092259110\&doi=10.1007\%2f978-3-030-58811-3_5\&partnerID=40\&md5=fd5730e8014d40306df07238106d32ff}, author = {Chlioui, I. and Abnane, I. and Idri, A.} } @article {Bachiri202036, title = {A Complete Prenatal Solution for a Reproductive Health Unit in Morocco}, journal = {Advances in Intelligent Systems and Computing}, volume = {1161 AISC}, year = {2020}, note = {cited By 0}, pages = {36-43}, abstract = {A prenatal mobile Personal Health Records (mPHR), along with an Electronic Health Records (EHR) are, respectively, exploited in order to permit both the pregnant women and gynecologists or obstetricians monitor the pregnancy progress in the best conditions. For this intent, a complete solution consisting of a prenatal mPHR and an EHR were developed for the maternity {\textquotedblleft}Les Orangers{\textquotedblright} of the Avicenne University Hospital in Rabat. The complete solution provides the main functionalities of a prenatal service. Thereafter, the solution will be validated by conducting an experiment for quality and potential assessment. Hence, a recruitment process has been determined to identify the eligibility criteria to enroll participants (pregnant women and gynecologists), in addition to planning the course of the experiment. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Complete solutions, electronic health record, Eligibility criterion, Health, Information systems, Information use, Personal health record, Pregnant woman, Recruitment process, reproductive health}, doi = {10.1007/978-3-030-45697-9_4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085481969\&doi=10.1007\%2f978-3-030-45697-9_4\&partnerID=40\&md5=e3d4adffd953495adbfb14b0d483c067}, author = {Bachiri, M. and Idri, A. and Rachad, T. and Alami, H. and Redman, L.M.} } @conference {Remli2020249, title = {Computer integrated manufacturing architecture: A literature review}, booktitle = {IC3K 2020 - Proceedings of the 12th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management}, volume = {3}, year = {2020}, note = {cited By 0}, pages = {249-256}, abstract = {The exponential technological revolution has had a positive impact on industrial companies, providing them with plenty of opportunities to improve their production flows and optimize their costs. This revolution has led to contemporary computer integrated manufacturing (CIM) that consists of linking the shop floor systems to the high business layer. And in order to do that, there has been some research to define a reference architecture to cover all the use cases. This paper presents a literature review of CIM architectures. The purpose of this review is to enumerate the different aspects covered by the different architectures in the literature and the approaches proposed to handle them. Copyright {\textcopyright} 2020 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved.}, keywords = {Computer architecture, Computer integrated manufacturing, Industrial companies, Knowledge management, Literature reviews, Production flows, Reference architecture, Search engines, Shop floor, Technological revolution}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85107133186\&partnerID=40\&md5=5b2fbe31955f56e4cfd019808278476c}, author = {Remli, A. and Khtira, A. and Asri, B.E.} } @article {Chlioui2020547, title = {Data preprocessing in knowledge discovery in breast cancer: systematic mapping study}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, volume = {8}, number = {5}, year = {2020}, note = {cited By 5}, pages = {547-561}, abstract = {Data Mining (DM) is a set of techniques that allow to analyse data from different perspectives and summarising it into useful information. Data mining has been increasingly used in medicine, especially in oncology. Data preprocessing is the most important step of knowledge extraction process and allows to improve the performance of the DM models. Breast cancer (BC) becomes the most common cancer among females worldwide and the leading cause of women{\textquoteright}s death. This paper aims to perform a systematic mapping study to analyse and synthesise studies on the application of preprocessing techniques for a DM task in breast cancer.Therefore, 66 relevant articles published between 2000 and October 2018 were selected and analysed according to five criteria: year/channel of publication, research type, medical task, empirical type and preprocessing task. The results show that Conferences and journals are the most targeted publication sources, researchers were more interested in applying preprocessing techniques for the diagnosis of BC, historical-based evaluation was the most used empirical type in the evaluation of preprocessing techniques in BC, and data reduction was the most investigated task of preprocessing in BC. However, A low number of papers discussed treatment which encourages researchers to devote more efforts to this task. {\textcopyright} 2020 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {algorithm, Article, Breast Cancer, cancer classification, cancer prognosis, clinical assessment, clinical outcome, Data mining, Data mining models, Data mining tasks, Data preprocessing, Diagnosis, diagnostic accuracy, Diseases, Extraction process, health promotion, human, image analysis, knowledge, knowledge discovery, Knowledge extraction, Machine learning, Mapping, Medical informatics, nerve cell network, neural crest cell, Performance, Pre-processing techniques, processing, screening test, Systematic mapping studies, Systematic Review, validity}, doi = {10.1080/21681163.2020.1730974}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85080112312\&doi=10.1080\%2f21681163.2020.1730974\&partnerID=40\&md5=befb1bc3f31f676a8e95bbc5bff5ab6d}, author = {Chlioui, I. and Idri, A. and Abnane, I.} } @article {Illi202013996, title = {On the Distribution of the Sum of M{\'a}laga-M Random Variables and Applications}, journal = {IEEE Transactions on Vehicular Technology}, volume = {69}, number = {11}, year = {2020}, note = {cited By 3}, pages = {13996-14000}, abstract = {In this paper, a very accurate approximation method for the statistics of the sum of M{\'a}laga-M random variates with pointing error (MRVs) is proposed. In particular, the probability density function of MRV is approximated by a Fox{\textquoteright}s H-function through the moment-based approach. Then, the respective moment-generating function of the sum of N MRVs is provided, based on which the average symbol error rate is evaluated for an N-branch maximal-ratio combining (MRC) receiver. The retrieved results show that the proposed approximate results match accurately with the exact simulated ones. Additionally, the results show that the achievable diversity order increases as a function of the number of MRC diversity branches. {\textcopyright} 1967-2012 IEEE.}, keywords = {Approximate results, Approximation methods, Average symbol error rate (SER), Diversity order, Error statistics, Maximal ratio combining (MRC) receivers, Moment generating function, Pointing errors, Probability density function, Random variates}, doi = {10.1109/TVT.2020.3025405}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096323448\&doi=10.1109\%2fTVT.2020.3025405\&partnerID=40\&md5=983059a7dbdf860db83a81d1454d56fc}, author = {Illi, E. and Bouanani, F.E. and Ayoub, F.} } @article {Mrhar202072, title = {A dropout predictor system in moocs based on neural networks}, journal = {Journal of Automation, Mobile Robotics and Intelligent Systems}, volume = {14}, number = {4}, year = {2020}, note = {cited By 2}, pages = {72-80}, abstract = {Massive open online courses, MOOCs, are a recent phenomenon that has achieved a tremendous media attention in the online education world. Certainly, the MOOCs have brought interest among the learners (given the number of enrolled learners in these courses). Nevertheless, the rate of dropout in MOOCs is very important. Indeed, a limited number of the enrolled learners complete their courses. The high dropout rate in MOOCs is perceived by the educator{\textquoteright}s community as one of the most important problems. It{\textquoteright}s related to diverse aspects, such as the motivation of the learners, their expectations and the lack of social interactions. However, to solve this problem, it is necessary to predict the likelihood of dropout in order to propose an appropriate intervention for learners at-risk of dropping out their courses. In this paper, we present a dropout predictor model based on a neural network algorithm and sentiment analysis feature that used the clickstream log and forum post data. Our model achieved an average AUC (Area under the curve) as high as 90\% and the model with the feature of the learner{\textquoteright}s sentiments analysis attained average increase in AUC of 0.5\%. {\textcopyright} 2020, Industrial Research Institute for Automation and Measurements. All rights reserved.}, doi = {10.14313/JAMRIS/4-2020/48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102757989\&doi=10.14313\%2fJAMRIS\%2f4-2020\%2f48\&partnerID=40\&md5=4cbaa0f7f458ce320541450af5ff8a46}, author = {Mrhar, K. and Douimi, O. and Abik, M.} } @article {Yatribi2020226, title = {An Efficient and Secure Forward Error Correcting Scheme for DNA Data Storage}, journal = {Advances in Intelligent Systems and Computing}, volume = {942}, year = {2020}, note = {cited By 0}, pages = {226-237}, abstract = {In this paper, a new efficient error correcting scheme for DNA archival digital data storage is proposed. We devise a double protection scheme for DNA oligos, aiming to ensure the protection of both information and indexing header data from both symbol flipping and erasure-burst errors, using two different cyclic ternary difference-set codes, which are known to be completely orthogonalisable and very easy to decode using a simple majority-logic decoding algorithm. We show that the proposed scheme is efficient and easily scalable, and provides a coding potential of 1.97 bit per nucleotide, and a reasonable net information density of 0.75 bit/nt under the considered experimental conditions, with relatively a lower decoding complexity and costs compared to other DNA data storage approaches. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Bioinformatics, Decoding, Decoding complexity, Difference sets, Digital storage, DNA, Error correcting scheme, Errors, Experimental conditions, Forward error correcting, Gene encoding, Information density, Majority logic, Pattern recognition, Protection schemes, Simple majority, Soft computing}, doi = {10.1007/978-3-030-17065-3_23}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064931520\&doi=10.1007\%2f978-3-030-17065-3_23\&partnerID=40\&md5=215574899d2a75166c0a2fc343cb673e}, author = {Yatribi, A. and Belkasmi, M. and Ayoub, F.} } @conference {Ahajjam20201701, title = {Electric Power Quality Disturbances Classification based on Temporal-Spectral Images and Deep Convolutional Neural Networks}, booktitle = {2020 International Wireless Communications and Mobile Computing, IWCMC 2020}, year = {2020}, note = {cited By 7}, pages = {1701-1706}, abstract = {We propose a deep learning based technique for power quality disturbances (PQD) detection and identification that aims at mimicking the reasoning of human field experts. This technique consists of processing small-size images containing superimposed time and frequency representations of the electric signal. The classification of PQD is performed with a convolutional neural network (CNN) trained with synthetic signals containing various single and multiple PQDs. Simulation results show that our technique is able to detect and identify with a high accuracy, in addition to pure sinusoidal, eight single PQDs and 20 of their combinations (up to four PQDs in the same signal) even in the presence of noise. Features such as lower computational load and simplicity while maintaining high performance sets the proposed technique apart from previous ones. {\textcopyright} 2020 IEEE.}, keywords = {Computational loads, Convolution, Convolutional neural networks, Deep learning, Deep neural networks, Detection and identifications, Electric power quality disturbances, Electric signal, Image classification, Mobile computing, Power quality, Power quality disturbances, Spectral images, Spectroscopy, Synthetic signals, Time and frequencies}, doi = {10.1109/IWCMC48107.2020.9148438}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85089652241\&doi=10.1109\%2fIWCMC48107.2020.9148438\&partnerID=40\&md5=a479607523102d68ad4d7d0188079ac8}, author = {Ahajjam, M.A. and Licea, D.B. and Ghogho, M. and Kobbane, A.} } @article {Abnane2020, title = {Fuzzy case-based-reasoning-based imputation for incomplete data in software engineering repositories}, journal = {Journal of Software: Evolution and Process}, volume = {32}, number = {9}, year = {2020}, note = {cited By 6}, abstract = {Missing data is a serious issue in software engineering because it can lead to information loss and bias in data analysis. Several imputation techniques have been proposed to deal with both numerical and categorical missing data. However, most of those techniques used is simple reuse techniques originally designed for numerical data, which is a problem when the missing data are related to categorical attributes. This paper aims (a) to propose a new fuzzy case-based reasoning (CBR) imputation technique designed for both numerical and categorical data and (b) to evaluate and compare the performance of the proposed technique with the k-nearest neighbor (KNN) imputation technique in terms of error and accuracy under different missing data percentages and missingness mechanisms in four software engineering data sets. The results suggest that the proposed fuzzy CBR technique outperformed KNN in terms of imputation error and accuracy regardless of the missing data percentage, missingness mechanism, and data set used. Moreover, we found that the missingness mechanism has an important impact on the performance of both techniques. The results are encouraging in the sense that using an imputation technique designed for both categorical and numerical data is better than reusing methods originally designed for numerical data. {\textcopyright} 2020 John Wiley \& Sons, Ltd.}, keywords = {accuracy, Case based reasoning, Categorical data, Clustering algorithms, Empirical Software Engineering, Fuzzy analogy, imputation, Missing data, Nearest neighbor search, Numerical methods, Software engineering}, doi = {10.1002/smr.2260}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85081904769\&doi=10.1002\%2fsmr.2260\&partnerID=40\&md5=762cb4270c6a55d209feaa8eb6df5c5f}, author = {Abnane, I. and Idri, A. and Abran, A.} } @article {Sardi2020931, title = {Gamified e-Health Solution to Promote Postnatal Care in Morocco}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12252 LNCS}, year = {2020}, note = {cited By 0}, pages = {931-946}, abstract = {The postnatal period is a critical phase in both the lives of the mothers and the newborns. Due to all the inherent changes that occur during this period, quality care is crucial during this period to enhance the wellbeing of the mothers and the newborns. In Morocco, the neglection of postnatal care services are often associated to poor communication, financial difficulties and cultural barriers. Mobile technology constitutes therefore a promising approach to bridge this gap and promote postnatal care. In order to improve the effectiveness of mobile technology, gamification has become a powerful feature to boost motivation and induce fun and interactivity into the mobile solutions{\textquoteright} tasks. Based on a previous review on mobile applications for postnatal care available in app repositories, a set of requirements have been identified to build a comprehensive mobile solution that cater the needs of both the mothers and the newborns. These requirements have, then, been enriched with real needs elicited at maternity Les orangers that belongs to the University Hospital Avicenne of Rabat. Along with the functional and non-functional requirements, gamification aspects have been also analyzed. After the analysis and design phases, a pilot version of the solution called {\textquoteleft}Mamma\&Baby{\textquoteright} has been implemented using android framework. {\textquoteleft}Mamma\&Baby{\textquoteright} is a mobile solution dedicated to assist new mothers during their postnatal period. As future work, it is expected to fully integrate the gamification elements into the solution and conduct an empirical evaluation of the overall quality and the potential of the solution with real puerperal women. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {E-health solutions, Empirical evaluations, Financial difficulties, Gamification, Mobile applications, Mobile solutions, Mobile Technology, Non-functional requirements, Overall quality, Quality control, Telecommunication equipment}, doi = {10.1007/978-3-030-58811-3_66}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85092252943\&doi=10.1007\%2f978-3-030-58811-3_66\&partnerID=40\&md5=2a3490959b88f98611929a0ad0efd78b}, author = {Sardi, L. and Idri, A. and Rachad, T. and Redman, L. and Alami, H.} } @article {YATRIBI2020100999, title = {Gradient-descent decoding of one-step majority-logic decodable codes}, journal = {Physical Communication}, volume = {39}, year = {2020}, pages = {100999}, abstract = {In this paper, a new low-complexity gradient-descent based iterative majority-logic decoder (GD-MLGD) is proposed for decoding One-Step Majority-Logic Decodable (OSMLD) codes. We give a formulation of the decoding problem of binary OSMLD codes, as a maximization problem of a derivable objective function. The optimization problem is solved using a pseudo gradient-descent algorithm, which performs iteratively an update towards the optimal estimated codeword been transmitted, based on the first-order partial derivatives of each variable calculated in the previous iteration. The proposed decoding scheme achieves a fast convergence to an optimum codeword compared to other decoding techniques reviewed in this paper, at the cost of lower computational complexity. The quantized version (QGD-MLGD) is also proposed in order to further reduce the computational complexity. Simulation results show that the proposed decoding algorithms outperform all the existing majority-logic decoding schemes, and also various gradient-descent based bit-flipping algorithms, and performs nearly close to the belief propagation sum{\textendash}product (BP-SP) decoding algorithm of LDPC codes, especially for high code lengths, providing an efficient trade-off between performance and decoding complexity. Moreover, the proposed quantized algorithm has shown to perform better than all the existing decoding techniques. The proposed decoding algorithms have shown to be suitable for ultra reliable, low latency and energy-constrained communication systems where both high performances and low-complexity are required.}, keywords = {AWGN channels, Decoding complexity, Difference-set codes, Finite geometry LDPC codes, Gradient-descent decoding, Iterative majority-logic decoding, Maximum likelihood decoding, OSMLD codes}, issn = {1874-4907}, doi = {https://doi.org/10.1016/j.phycom.2019.100999}, url = {https://www.sciencedirect.com/science/article/pii/S1874490719306184}, author = {Anouar Yatribi and Mostafa Belkasmi and Fouad Ayoub} } @article {Yatribi2020, title = {Gradient-descent decoding of one-step majority-logic decodable codes}, journal = {Physical Communication}, volume = {39}, year = {2020}, note = {cited By 1}, abstract = {In this paper, a new low-complexity gradient-descent based iterative majority-logic decoder (GD-MLGD) is proposed for decoding One-Step Majority-Logic Decodable (OSMLD) codes. We give a formulation of the decoding problem of binary OSMLD codes, as a maximization problem of a derivable objective function. The optimization problem is solved using a pseudo gradient-descent algorithm, which performs iteratively an update towards the optimal estimated codeword been transmitted, based on the first-order partial derivatives of each variable calculated in the previous iteration. The proposed decoding scheme achieves a fast convergence to an optimum codeword compared to other decoding techniques reviewed in this paper, at the cost of lower computational complexity. The quantized version (QGD-MLGD) is also proposed in order to further reduce the computational complexity. Simulation results show that the proposed decoding algorithms outperform all the existing majority-logic decoding schemes, and also various gradient-descent based bit-flipping algorithms, and performs nearly close to the belief propagation sum{\textendash}product (BP-SP) decoding algorithm of LDPC codes, especially for high code lengths, providing an efficient trade-off between performance and decoding complexity. Moreover, the proposed quantized algorithm has shown to perform better than all the existing decoding techniques. The proposed decoding algorithms have shown to be suitable for ultra reliable, low latency and energy-constrained communication systems where both high performances and low-complexity are required. {\textcopyright} 2020 Elsevier B.V.}, keywords = {AWGN channel, Backpropagation, Computational complexity, Computer circuits, Decoding complexity, Difference sets, Economic and social effects, Electronic trading, Gradient descent, Gradient methods, Iterative decoding, LDPC codes, Majority logic, Majority logic decoding, Maximum likelihood, Maximum likelihood decoding, Optimization, OSMLD codes}, doi = {10.1016/j.phycom.2019.100999}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077749482\&doi=10.1016\%2fj.phycom.2019.100999\&partnerID=40\&md5=830156cfb12a8ec3dbff2e002b3ade63}, author = {Yatribi, A. and Belkasmi, M. and Ayoub, F.} } @conference {Bouabdellah2020, title = {Hybrid Very High Throughput Satellites: Potential, Challenges, and Research Directions}, booktitle = {2020 8th International Conference on Communications and Networking, ComNet2020 - Proceedings}, year = {2020}, note = {cited By 4}, abstract = {Hybrid terrestrial-satellite transmission systems have been among hottest spots in the wireless communication industry due to the high demand for global high data rates. In such systems, free-space optical (FSO) technology is advocated for assessing the feeder links (ground-satellite) in order to fulfill high data rates requirements and provide a larger coverage in terms of the connected number of users. This paper gives an overview of various components of very-high throughput hybrid terrestrial-satellite communication system with an FSO feeder link, and Ka-band multibeam RF channels for the satellite ground link. In this survey paper, we depict the main communication challenges in the FSO side such as pointing error, turbulence, cloud blockage, and also the multibeam RF side such as spectrum scarcity, inter-beam interference, and security. Based on these challenges we point out some potential research directions. {\textcopyright} 2020 IEEE.}, keywords = {Free-space optical, High data rate, High throughput, Inter-beam interference, Pointing errors, Potential researches, Power control, Satellite communication systems, Satellite links, Satellite transmission, Satellites, Wireless communications}, doi = {10.1109/ComNet47917.2020.9306099}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85100003768\&doi=10.1109\%2fComNet47917.2020.9306099\&partnerID=40\&md5=ab22ef8fab218cbdde68b99793a394e3}, author = {Bouabdellah, M. and Illi, E. and Bouanani, F.E. and Alouini, M.-S.} } @article {Ahajjam2020, title = {IMPEC: An integrated system for monitoring and processing electricity consumption in buildings}, journal = {Sensors (Switzerland)}, volume = {20}, number = {4}, year = {2020}, note = {cited By 4}, abstract = {Non-intrusive Load Monitoring (NILM) systems aim at identifying and monitoring the power consumption of individual appliances using the aggregate electricity consumption. Many issues hinder their development. For example, due to the complexity of data acquisition and labeling, datasets are scarce; labeled datasets are essential for developing disaggregation and load prediction algorithms. In this paper, we introduce a new NILM system, called Integrated Monitoring and Processing Electricity Consumption (IMPEC). The main characteristics of the proposed system are flexibility, compactness, modularity, and advanced on-board processing capabilities. Both hardware and software parts of the system are described, along with several validation tests performed at residential and industrial settings. {\textcopyright} 2020 by the authors. Licensee MDPI, Basel, Switzerland.}, keywords = {Article, Data acquisition, Disaggregation, Electric load management, Electric power utilization, electricity, Electricity-consumption, Energy monitoring, Hardware and software, Industrial settings, Integrated monitoring, Labeled data, Monitoring, Nonintrusive load monitoring, On-board processing, software, Software testing}, doi = {10.3390/s20041048}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079573681\&doi=10.3390\%2fs20041048\&partnerID=40\&md5=a145afa634dd9bf882c3c570823158c9}, author = {Ahajjam, M.A. and Licea, D.B. and Ghogho, M. and Kobbane, A.} } @article {Elhassouni202093, title = {The implementation of credit risk scorecard using ontology design patterns and BCBS 239}, journal = {Cybernetics and Information Technologies}, volume = {20}, number = {2}, year = {2020}, note = {cited By 4}, pages = {93-104}, abstract = {Nowadays information and communication technologies are playing a decisive role in helping the financial institutions to deal with the management of credit risk. There have been significant advances in scorecard model for credit risk management. Practitioners and policy makers have invested in implementing and exploring a variety of new models individually. Coordinating and sharing information groups, however, achieved less progress. One of several causes of the 2008 financial crisis was in data architecture and information technology infrastructure. To remedy this problem the Basel Committee on Banking Supervision (BCBS) outlined a set of principles called BCBS 239. Using Ontology Design Patterns (ODPs) and BCBS 239, credit risk scorecard and applicant ontologies are proposed to improve the decision making process in credit loan. Both ontologies were validated, distributed in Ontology Web Language (OWL) files and checked in the test cases using SPARQL. Thus, making their (re)usability and expandability easier in financial institutions. These ontologies will also make sharing data more effective and less costly. {\textcopyright} 2020 Sciendo. All rights reserved.}, doi = {10.2478/cait-2020-0019}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85091363101\&doi=10.2478\%2fcait-2020-0019\&partnerID=40\&md5=abed4b96951571c6e4dccfb2dbd8e8e2}, author = {Elhassouni, J. and El Qadi, A. and El Madani El Alami, Y. and El Haziti, M.} } @article {Assila2020, title = {Improving caching resource management: A pricing economic approach using Cournot, Bertrand, and Stackelberg game models}, journal = {International Journal of Communication Systems}, year = {2020}, note = {cited By 3}, abstract = {Over-The-Top broadcasts a huge number of medias that mobile network operators have to manage efficiently before to deliver it to their subscribers. We propose an economic pricing approach to address caching resource management issues in the 5G wireless networks and to overcome limitations in terms of throughput, latency, and reliability. Moreover, we consider this approach based on an oligopolistic multi-market deducted from Cournot, Stackelberg, and Bertrand models. For simulation purpose, we consider the routing protocol (Ad-hoc On-Demand Distance Vector [AODV]) commonly used for the wireless network. We use the NS-2 package, and we analyze results in terms of End-to-End delay representing latency, throughput, packet delivery ratio, and normalized network load. {\textcopyright} 2020 John Wiley \& Sons, Ltd.}, keywords = {5G mobile communication systems, Ad hoc on demand distance vector, caching, Costs, Cournot, Economics, Mobile network operators, Mobile telecommunication systems, Natural resources management, Packet delivery ratio, Resource allocation, Resource Management, Stackelberg, Stackelberg Games, wireless networks}, doi = {10.1002/dac.4358}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079387484\&doi=10.1002\%2fdac.4358\&partnerID=40\&md5=70ae8f696b34a21860572f8c3437dd46}, author = {Assila, B. and Kobbane, A. and Ben-Othman, J.} } @article {ElFazziki2020136, title = {Improving Collaborative Filtering Approach by Leveraging Opposite Users}, journal = {Advances in Intelligent Systems and Computing}, volume = {1102 AISC}, year = {2020}, note = {cited By 0}, pages = {136-145}, abstract = {Collaborative filtering is a widely used recommendation approach that aims to predict for a target user the most appropriate items. This approach uses the ratings given by users who share similar tastes and preferences to predict ratings for items that haven{\textquoteright}t been rated yet. Despite its simplicity and justifiability, CF approach stills suffering from several drawbacks and problems, including sparsity, gray sheep and scalability. These problems affect the accuracy of the obtained results. In this work, we present a novel collaborative filtering approach based on the opposite preferences of users. We focus on enhancing the accuracy of predictions and dealing with gray sheep problem by inferring new similar neighbors based on users who have dissimilar tastes and preferences. For instance, if a user X is dissimilar to a user Y then the user +X is similar to the user Y. The Experimental results performed on two datasets including MovieLens and FilmTrust show that our approach outperforms several baseline recommendation techniques. {\textcopyright} Springer Nature Switzerland AG 2020.}, keywords = {Collaborative filtering, Forecasting, Gray sheep, Intelligent systems, Learning systems, Movielens, Opposite neighbors, planning, Recommendation techniques, Recommender Systems, Similar neighbors, Similarity measure, Sustainable development}, doi = {10.1007/978-3-030-36653-7_14}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078457465\&doi=10.1007\%2f978-3-030-36653-7_14\&partnerID=40\&md5=fdeb5a86480517f6637d45ebc49b0849}, author = {El Fazziki, A. and El Madani El Alami, Y. and El Aissaoui, O. and El Allioui, Y. and Benbrahim, M.} } @article {AlAfandy2020652, title = {Investment of classic deep CNNs and SVM for classifying remote sensing images}, journal = {Advances in Science, Technology and Engineering Systems}, volume = {5}, number = {5}, year = {2020}, note = {cited By 2}, pages = {652-659}, abstract = {Feature extraction is an important process in image classification for achieving an efficient accuracy for the classification learning models. One of these methods is using the convolution neural networks. The use of the trained classic deep convolution neural networks as features extraction gives a considerable results in the remote sensing images classification models. So, this paper proposes three classification approaches using the support vector machine where based on the use of the ImageNet pre-trained weights classic deep convolution neural networks as features extraction from the remote sensing images. There are three convolution models that used in this paper; the Densenet 169, the VGG 16, and the ResNet 50 models. A comparative study is done by extract features using the outputs of the mentioned ImageNet pre-trained weights convolution models after transfer learning, and then use these extracted features as input features for the support vector machine classifier. The used datasets in this paper are the UC Merced land use dataset and the SIRI-WHU dataset. The comparison is based on calculating the overall accuracy to assess the classification model performance. {\textcopyright} 2020 ASTES Publishers. All rights reserved.}, doi = {10.25046/AJ050580}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85095822116\&doi=10.25046\%2fAJ050580\&partnerID=40\&md5=45b769e14c4c2aabfb41baf3e92d7ae3}, author = {AlAfandy, K.A. and Omara, H. and Lazaar, M. and Achhab, M.A.} } @conference {Fagroud2020572, title = {IoT search engines: Exploratory data analysis}, booktitle = {Procedia Computer Science}, volume = {175}, year = {2020}, note = {cited By 4}, pages = {572-577}, abstract = {IOT search engine (IOTSE) is a search tool proposed with the aim to allow research, list and identification of IOT devices (All devices connected to the internet represent an IOT device). This tool provides a huge amount of data, which is justified by the exponential increase of the number of objects connected in the world. The data provided must be analyzed and interpreted in the aim to give a better understanding, quickly description and help to make decisions. In this work we applied exploratory data analysis on IOT search engine data in order to give an analysis of IOTSE data with visual method. {\textcopyright} 2020 The Authors.}, keywords = {Data handling, Exploratory data analysis, Exponential increase, Information analysis, Internet of things, Search engines, Search tools, Ubiquitous computing}, doi = {10.1016/j.procs.2020.07.082}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85094620165\&doi=10.1016\%2fj.procs.2020.07.082\&partnerID=40\&md5=d36b954ecd9cc39a0e670b7bcb86a85b}, author = {Fagroud, F.Z. and Ajallouda, L. and Lahmar, E.H.B. and Toumi, H. and Achtaich, K. and El Filali, S.} } @article {FAGROUD2020572, title = {IOT Search Engines: Exploratory Data Analysis}, journal = {Procedia Computer Science}, volume = {175}, year = {2020}, note = {The 17th International Conference on Mobile Systems and Pervasive Computing (MobiSPC),The 15th International Conference on Future Networks and Communications (FNC),The 10th International Conference on Sustainable Energy Information Technology}, pages = {572-577}, abstract = {IOT search engine (IOTSE) is a search tool proposed with the aim to allow research, list and identification of IOT devices (All devices connected to the internet represent an IOT device). This tool provides a huge amount of data, which is justified by the exponential increase of the number of objects connected in the world. The data provided must be analyzed and interpreted in the aim to give a better understanding, quickly description and help to make decisions. In this work we applied exploratory data analysis on IOT search engine data in order to give an analysis of IOTSE data with visual method.}, keywords = {Exploratory data analysis, IOT, IOTSE, Search Engine}, issn = {1877-0509}, doi = {https://doi.org/10.1016/j.procs.2020.07.082}, url = {https://www.sciencedirect.com/science/article/pii/S1877050920317828}, author = {Fatima Zahra Fagroud and Lahbib Ajallouda and El Habib Ben Lahmar and Hicham Toumi and Khadija Achtaich and Sanaa El Filali} } @article {Azougaghe2020103, title = {Iterative Decoding of GSCB Codes Based on RS Codes Using Adapted Scaling Factors}, journal = {Communications in Computer and Information Science}, volume = {1264}, year = {2020}, note = {cited By 0}, pages = {103-114}, abstract = {In this work, we have extended two algorithms to decode generalized serially concatenated block codes based on RS codes (GSCB-RS). The first is the modified Chase-Pyndiah algorithm (MCPA) proposed by Farchane and Belkasmi[1]. The second is the Chase-Pyndiah algorithm (CPA) that is developed initially for decoding turbo product codes[2]. We also investigated the effect of different parameters, namely component codes, the size and structure of the interleaver and the number of iterations, using computer simulations. The simulations result shows that the performance of the GSCB-RS codes using the MCPA decoder out performs the CPA decoder that uses predetermined weighting factor (α) and reliability factor (β) parameters. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Block codes, Component codes, Concatenated codes, Data communication systems, Interleavers, Iterative decoding, Number of iterations, Reliability factor, RS codes, Scaling factors, Security of data, Turbo product codes, Weighting factors}, doi = {10.1007/978-3-030-61143-9_9}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097364331\&doi=10.1007\%2f978-3-030-61143-9_9\&partnerID=40\&md5=5a3be47852101aec7c21c1ee8c52200c}, author = {Azougaghe, E.-S. and Farchane, A. and Safi, S. and Belkasmi, M.} } @conference {Ajallouda2020169, title = {K-means hac and fcm which clustering approach for arabic text?}, booktitle = {ACM International Conference Proceeding Series}, year = {2020}, note = {cited By 0}, pages = {169-174}, abstract = {Today we are witnessing rapid growth in Web resources that allow Internet users to express and share their ideas opinions and judgments on a variety of issues. Several classification approaches have been proposed to classify textual data. But all these approaches require us to label the clusters we want to obtain. Which in reality is not available because we do not know in advance the information that can be proposed through these opinions. To overcome this constraint clustering approaches such as K-mean HAC or FCM can be exploited. In this paper we present and compare these approaches. And to show the importance of exploiting clustering algorithms to classify and analyze textual data in Arabic. By applying them to a real case that has created a great debate in Morocco which is the case of teachers contracting with academies. {\textcopyright} 2020 ACM.}, keywords = {Arabic texts, Classification (of information), Classification approach, Clustering approach, Intelligent systems, Internet users, K-means clustering, Rapid growth, Real case, Textual data, Web resources}, doi = {10.1145/3419604.3419779}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096415898\&doi=10.1145\%2f3419604.3419779\&partnerID=40\&md5=8aca383d8f485d15b3da191f03c5bf62}, author = {Ajallouda, L. and Fagroud, F.Z. and Zellou, A. and Benlahmar, E.H.} } @article {Assami2020390, title = {Learning actor ontology for a personalised recommendation in massive open online courses}, journal = {International Journal of Technology Enhanced Learning}, volume = {12}, number = {4}, year = {2020}, note = {cited By 4}, pages = {390-410}, abstract = {Massive Open Online Courses revolutionised online learning and instigated research on information and communication technologies for learning to enhance the learner experience and increase his engagement level. In earlier research, we identified the recommendation criteria that could be used to recommend suitable MOOCs for the learner{\textquoteright}s needs and motivations. Thus, criteria like the level of knowledge, competences and the pace of learning introduced by MOOCs and preferred by learners will be matched by a personalised recommender system. In this paper, we model the Learning Actor ontology to be used for this matching process. It is a domain ontology that was developed by following the phases of the on-to-knowledge methodology: feasibility study, kickoff phase, refinement phase, evaluation and validation phase and, finally, the maintenance phase. At last, we obtained the learning ontology that describes the {\textquotedblleft}learning actor{\textquotedblright} major class by using a complex definition of characteristics and their relationship and range of values. Copyright {\textcopyright} 2020 Inderscience Enterprises Ltd.}, doi = {10.1504/IJTEL.2020.110048}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85092666488\&doi=10.1504\%2fIJTEL.2020.110048\&partnerID=40\&md5=100979a3e3eab5e76ce2172142db2700}, author = {Assami, S. and Daoudi, N. and Ajhoun, R.} } @article {Zerouaoui202044, title = {Machine Learning and Image Processing for Breast Cancer: A Systematic Map}, journal = {Advances in Intelligent Systems and Computing}, volume = {1161 AISC}, year = {2020}, note = {cited By 3}, pages = {44-53}, abstract = {Machine Learning (ML) combined with Image Processing (IP) gives a powerful tool to help physician, doctors and radiologist to make more accurate decisions. Breast cancer (BC) is a largely common disease among women worldwide; it is one of the medical sub-field that are experiencing an emergence of the use of ML and IP techniques. This paper explores the use of ML and IP techniques for BC in the form of a systematic mapping study. 530 papers published between 2000 and August 2019 were selected and analyzed according to 6 criteria: year and publication channel, empirical type, research type, medical task, machine learning objectives and datasets used. The results show that classification was the most used ML objective. As for the datasets most of the articles used private datasets belonging to hospitals, although papers using public data choose MIAS (Mammographic Image Analysis Society) which make it as the most used public dataset. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Breast Cancer, Common disease, Diseases, Information systems, Information use, Machine learning, Mammographic image analysis, mammography, Medical imaging, Public data, Public dataset, Sub fields, Systematic mapping studies, Systematic maps}, doi = {10.1007/978-3-030-45697-9_5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085473168\&doi=10.1007\%2f978-3-030-45697-9_5\&partnerID=40\&md5=e980b38be1c8a8499c579b990b79b909}, author = {Zerouaoui, H. and Idri, A. and El Asnaoui, K.} } @article {Sanak2020203, title = {MARCO Gene Variations and Their Association with Cardiovascular Diseases Development: An In-Silico Analysis}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12108 LNBI}, year = {2020}, note = {cited By 0}, pages = {203-212}, abstract = {Cardiovascular diseases (CVDs) represent the leading cause of morbidity and mortality in both developed and developing countries. They have complex etiology, influenced by several risk factors including the genetic component. The genetic variations were shown to be highly associated with different CVD forms, in this objective we proceeded to analyze the Macrophage Receptor with Collagen structure gene (MARCO), we performed an in-silico study with a genomic functional analysis, to evaluate the mutations{\textquoteright} effects on the proteins{\textquoteright} structures and functionalities. Indeed, we used dbSNP to retrieve single nucleotide polymorphisms (SNPs) of MARCO gene. We proceeded then to a filtration and a stability analysis using several bioinformatics tools to evaluate the most deleterious variations. Moreover we predicted the 3D structures of the encoded proteins by MARCO gene, which was validated using PROCHECK. Then we analyzed and visualize the proteins{\textquoteright} 3D structures. The extraction of the human MARCO gene SNPs revealed that dbSNP contains more than 14000 SNPs. The filtration process revealed the variations G241V and G262W to be the most deleterious SNPs, indeed, I-Mutant and DUET showed decreased protein stability. The validation using PROCHECK revealed a total of 89.9\% MARCO protein residues to be in the favored region. As conclusion, our results let suggesting that G241V and G262W variations can cause alteration in the proteins{\textquoteright} structures and functions. Hence, to improve the health management, screening precariously these variants, can be useful as model for CVD diagnosis and helpful in pharmacogenomics. {\textcopyright} Springer Nature Switzerland AG 2020.}, keywords = {Bioinformatics, Bioinformatics tools, Biomedical engineering, Cardio-vascular disease, Cardiology, Collagen structure, Developing countries, Diagnosis, Diseases, Filtration process, Genes, Genetic components, Genetic variation, Health management, Proteins, Single nucleotide polymorphisms}, doi = {10.1007/978-3-030-45385-5_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085196738\&doi=10.1007\%2f978-3-030-45385-5_19\&partnerID=40\&md5=1df531cdc3510e8747834a13a996b1b1}, author = {Sanak, K. and Azzouzi, M. and Abik, M. and Radouani, F.} } @article {ElAissaoui2020655, title = {Mining Learners{\textquoteright} Behaviors: An Approach Based on Educational Data Mining Techniques}, journal = {Advances in Intelligent Systems and Computing}, volume = {1076}, year = {2020}, note = {cited By 2}, pages = {655-670}, abstract = {Educational data mining is a research field that aims to apply data mining techniques in educational environments. Many data mining techniques such as clustering, classification, and prediction can be performed on educational data in order to analyze the learner behaviors. In this work, we have used the clustering and classification techniques to predict the learners{\textquoteright} learning styles. The students{\textquoteright} behaviors while using the e-learning system have been captured from the log file and given as an input of a clustering algorithm to group them into 16 clusters. The resulted clusters were labeled with learning styles combinations based on the Felder and Silverman learning style model. Then the labeled behaviors were given as input to four classifiers: naive Bayes, Cart, Id3, and C4.5 to compare their performance in predicting students{\textquoteright} learning styles. The four classifiers were performed using Weka data mining tool, and the obtained results showed that Id3 yielded better results than the other classifiers. {\textcopyright} Springer Nature Singapore Pte Ltd. 2020.}, keywords = {Artificial intelligence, Classification technique, Clustering algorithms, Data mining, Data-mining tools, Educational data mining, Educational environment, Embedded systems, Forecasting, Learning Style, Learning systems, Log file, Naive bayes, Research fields, Students}, doi = {10.1007/978-981-15-0947-6_62}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085173246\&doi=10.1007\%2f978-981-15-0947-6_62\&partnerID=40\&md5=0cb9665b2720bd9b06e011902e8708b5}, author = {El Aissaoui, O. and El Alami El Madani, Y. and Oughdir, L. and Dakkak, A. and El Allioui, Y.} } @article {Sardi2020, title = {Mobile health applications for postnatal care: Review and analysis of functionalities and technical features}, journal = {Computer Methods and Programs in Biomedicine}, volume = {184}, year = {2020}, note = {cited By 19}, abstract = {Background: Providing a continuum of care from antenatal, childbirth and postnatal period results in reduced maternal and neonatal morbidity and mortality. Timely, high quality postnatal care is crucial for maximizing maternal and newborn health. In this vein, the use of postnatal mobile applications constitutes a promising strategy. Methods: A Systematic Literature Review (SLR) protocol was adopted to perform the selection, data extraction and functional evaluation of the available postnatal apps on iOS and Android platforms. The analysis of the functionalities and technical features of the apps selected was performed according to a 37-items assessment questionnaire developed on the basis of the scientific literature of postnatal care and a preliminary analysis of available postnatal apps Results: A total of 48 postnatal apps were retrieved from the app repositories of the iOS and Android platforms. The results of the functional content analysis show that the postnatal apps selected relatively achieved low scores owing to the complexity and the ramification of the postnatal care. Conclusions: The present study helps in identifying areas related to the postnatal care that require further endeavors to be properly addressed. It also provides directions for developers to leverage the advancement and innovation on mobile technology to build complete and well-suited postnatal apps {\textcopyright} 2019}, keywords = {Android (operating system), Application programs, Article, childbirth, content analysis, data extraction, evaluation study, female, Functional evaluation, Functionality, human, Humans, Infant, iOS (operating system), mHealth, mobile application, Mobile applications, Mobile health application, newborn, newborn morbidity, perinatal period, Postnatal care, Postpartum Period, pregnancy, Preliminary analysis, procedures, puerperium, questionnaire, Scientific literature, Systematic literature review (SLR), Systematic Review, telemedicine}, doi = {10.1016/j.cmpb.2019.105114}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073719026\&doi=10.1016\%2fj.cmpb.2019.105114\&partnerID=40\&md5=df137ca63d506f88c53e16c63f3e0c65}, author = {Sardi, L. and Idri, A. and Readman, L.M. and Alami, H. and Bezad, R. and Fernandez-Aleman, J.L.} } @article {SARDI2020105114, title = {Mobile health applications for postnatal care: Review and analysis of functionalities and technical features}, journal = {Computer Methods and Programs in Biomedicine}, volume = {184}, year = {2020}, pages = {105114}, abstract = {Background Providing a continuum of care from antenatal, childbirth and postnatal period results in reduced maternal and neonatal morbidity and mortality. Timely, high quality postnatal care is crucial for maximizing maternal and newborn health. In this vein, the use of postnatal mobile applications constitutes a promising strategy. Methods A Systematic Literature Review (SLR) protocol was adopted to perform the selection, data extraction and functional evaluation of the available postnatal apps on iOS and Android platforms. The analysis of the functionalities and technical features of the apps selected was performed according to a 37-items assessment questionnaire developed on the basis of the scientific literature of postnatal care and a preliminary analysis of available postnatal apps Results A total of 48 postnatal apps were retrieved from the app repositories of the iOS and Android platforms. The results of the functional content analysis show that the postnatal apps selected relatively achieved low scores owing to the complexity and the ramification of the postnatal care. Conclusions The present study helps in identifying areas related to the postnatal care that require further endeavors to be properly addressed. It also provides directions for developers to leverage the advancement and innovation on mobile technology to build complete and well-suited postnatal apps}, keywords = {Apps, Functionality, Mobile health, Postnatal care}, issn = {0169-2607}, doi = {https://doi.org/10.1016/j.cmpb.2019.105114}, url = {https://www.sciencedirect.com/science/article/pii/S0169260719304559}, author = {Lamyae Sardi and Ali Idri and Leanne M. Redman and Hassan Alami and Rachid Bezad and Jos{\'e} Luis Fern{\'a}ndez-Alem{\'a}n} } @conference {ElAkrouchi2020, title = {Monitoring Early Warning Signs Evolution through Time}, booktitle = {ACM International Conference Proceeding Series}, year = {2020}, note = {cited By 0}, abstract = {In excessive business competition, detecting weak signals is very important to anticipate future changes and events. The process of detecting weak signals is very challenging, and many techniques were proposed to automatize this challenge but still needs the intervention of experts{\textquoteright} opinion. Understanding those detected signals and their evolution in time is crucial to reveal the alertness of possible future events and warnings. For this reason, this paper proposes a new algorithm to strengthen weak signals into early warning signs. The proposed algorithm aims to monitor and track weak signals{\textquoteright} evolution within time. The output will be a list of early warning signs and visualization to illustrate their evolution in time. Finally, to adequately understand the early warning signs obtained and enhance their semantic alertness, we used Word2Vec modeling to provide semantically similar words to these warning signs and improve their contextual alertness. We tested this algorithm on a web news dataset of 2006-2007 to detect early warning signs related to the 2008 financial crisis ahead of time. We obtained prominent results in strengthening and monitoring the evolution of early warning signs related to this crisis. {\textcopyright} 2020 ACM.}, keywords = {Artificial intelligence, Business competition, Competition, Early warning signs, Financial crisis, Possible futures, Semantics, Signal detection, Warning signs, Weak signals}, doi = {10.1145/3446132.3446173}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102970850\&doi=10.1145\%2f3446132.3446173\&partnerID=40\&md5=4f35fa04a194332457fca88ff3bec0d1}, author = {El Akrouchi, M. and Benbrahim, H. and Kassou, I.} } @article {Ahajjam2020, title = {Mored: A moroccan buildings{\textquoteright} electricity consumption dataset}, journal = {Energies}, volume = {13}, number = {24}, year = {2020}, note = {cited By 6}, abstract = {This paper consists of two parts: An overview of existing open datasets of electricity consumption and a description of the Moroccan Buildings{\textquoteright} Electricity Consumption Dataset, a first of its kind, coined as MORED. The new dataset comprises electricity consumption data of various Moroccan premises. Unlike existing datasets, MORED provides three main data components: Whole premises (WP) electricity consumption, individual load (IL) ground-truth consumption, and fully labeled IL signatures, from affluent and disadvantaged neighborhoods. The WP consumption data were acquired at low rates (1/5 or 1/10 samples/s) from 12 households; the IL ground-truth data were acquired at similar rates from five households for extended durations; and IL signature data were acquired at high and low rates (50 k and 4 samples/s) from 37 different residential and industrial loads. In addition, the dataset encompasses non-intrusive load monitoring (NILM) metadata. {\textcopyright} 2020 by the authors.}, keywords = {Data components, Electric load management, Electric power utilization, Electricity-consumption, Ground truth, Ground truth data, Industrial loads, Low rates, Nonintrusive load monitoring, Signature data}, doi = {10.3390/en13246737}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85106664615\&doi=10.3390\%2fen13246737\&partnerID=40\&md5=2d7b881b6d24a1d21b672b26fa9ee62f}, author = {Ahajjam, M.A. and Licea, D.B. and Essayeh, C. and Ghogho, M. and Kobbane, A.} } @article {Filali2020197017, title = {Multi-access edge computing: A survey}, journal = {IEEE Access}, volume = {8}, year = {2020}, note = {cited By 49}, pages = {197017-197046}, abstract = {Multi-access Edge Computing (MEC) is a key solution that enables operators to open their networks to new services and IT ecosystems to leverage edge-cloud benefits in their networks and systems. Located in close proximity from the end users and connected devices, MEC provides extremely low latency and high bandwidth while always enabling applications to leverage cloud capabilities as necessary. In this article, we illustrate the integration of MEC into a current mobile networks{\textquoteright} architecture as well as the transition mechanisms to migrate into a standard 5G network architecture.We also discuss SDN, NFV, SFC and network slicing as MEC enablers. Then, we provide a state-of-the-art study on the different approaches that optimize the MEC resources and its QoS parameters. In this regard, we classify these approaches based on the optimized resources and QoS parameters (i.e., processing, storage, memory, bandwidth, energy and latency). Finally, we propose an architectural framework for a MEC-NFV environment based on the standard SDN architecture. {\textcopyright} 2020 Institute of Electrical and Electronics Engineers Inc.. All rights reserved.}, keywords = {5G mobile communication systems, Architectural frameworks, Bandwidth, Close proximity, Edge computing, Network function virtualization, Network slicing, Networks and systems, Optimized resources, QoS parameters, State of the art, Transition mechanism}, doi = {10.1109/ACCESS.2020.3034136}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85099901543\&doi=10.1109\%2fACCESS.2020.3034136\&partnerID=40\&md5=bc99db081ebbbf5eb6d188c020bb33a2}, author = {Filali, A. and Abouaomar, A. and Cherkaoui, S. and Kobbane, A. and Guizani, M.} } @article {Bourekkache2020255, title = {Multi-agent approach for collaborative authoring and indexing of pedagogical materials}, journal = {International Journal of Continuing Engineering Education and Life-Long Learning}, volume = {30}, number = {3}, year = {2020}, note = {cited By 1}, pages = {255-275}, abstract = {In e-learning environment, the learner may feel that he is isolated and disoriented because of the absence of the teacher and the huge number of materials. Moreover, the pedagogical documents have several characteristics so that we must offer the appropriate documents for each learner according to his level, characteristics, and preferences{\textellipsis}, etc. Consequently, the adaption of the learning content is an important technique. Creating materials, without additional information, makes the delivering of relevant material an impossible task. Consequently, one has to pay attention to the stage of creating of learning content using new technics. In addition, it may not be convenient if we haven{\textquoteright}t additional information about the learner and the learning material (learning objective, prerequisites, and learner background{\textellipsis}, etc.). Therefore, we develop a multi-agent system that supports a set of authors who create and index educational materials. The indexes are used to manipulate the learning content efficiently by the machine when choosing the appropriate content to satisfy the needs of heterogeneous learners. Copyright {\textcopyright} 2020 Inderscience Enterprises Ltd.}, keywords = {Collaborative authoring, Computer aided instruction, E-learning environment, Educational materials, Learning contents, Learning materials, Learning objectives, Multi agent systems, Multi-agent approach}, doi = {10.1504/IJCEELL.2020.108527}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85093856724\&doi=10.1504\%2fIJCEELL.2020.108527\&partnerID=40\&md5=5cd4fb75d55b6654a672036b17252936}, author = {Bourekkache, S. and Kazar, O. and Abik, M.} } @conference {Fellir2020377, title = {A multi-Agent based model for task scheduling in cloud-fog computing platform}, booktitle = {2020 IEEE International Conference on Informatics, IoT, and Enabling Technologies, ICIoT 2020}, year = {2020}, note = {cited By 9}, pages = {377-382}, abstract = {The Internet-of-Things (IoT) is a network of physical objects that communicate and exchange data between them over the Internet. These objects, or things, generate a massive amount of data that require powerful analytics approaches and data storage platforms - this is where cloud computing may well come into play. The classic cloud computing paradigm, however, faces several serious issues, due to centralization, such as low performance, latency issues, security issues, bandwidth obstacle, etc.. The Fog (Edge) computing paradigm has been proposed to improve the performance and to address other weaknesses, by providing IoT data processing and storage capabilities locally at the edges (IoT devices), instead of sending all the data to the centralized cloud. Whether in cloud or in fog computing, with billions of devices and a huge number of simultaneous requests, managing limited resources becomes essential. One of the important steps in resource management is task scheduling. In this paper, we consider task scheduling in a cloud-fog computing platform, we propose a multi-agent based model that aims at serving the most important task first, taking into consideration the task priority, its wait time, its status and the resources required to complete it successfully. Furthermore, in the scheduling process, we first propose an update to the priority value of the task, while taking into consideration its dependencies to other tasks and their priorities. Simulation results show that our proposed model can lead to better resource utilization and improve performance. {\textcopyright} 2020 IEEE.}, keywords = {Autonomous agents, Computational methods, Computing paradigm, Computing platform, Data handling, Digital storage, Fog, Fog computing, Improve performance, Internet of thing (IOT), Internet of things, Multi agent systems, Multi-agent based modeling, Multitasking, Resource Management, Resource utilizations, Scheduling, Scheduling algorithms, Simulation platform, Storage capability}, doi = {10.1109/ICIoT48696.2020.9089625}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085496418\&doi=10.1109\%2fICIoT48696.2020.9089625\&partnerID=40\&md5=4ee0e2ab76422fc76603ec3b6f6942cb}, author = {Fellir, F. and El Attar, A. and Nafil, K. and Chung, L.} } @article {ElAissaoui20209, title = {A Multiple Linear Regression-Based Approach to Predict Student Performance}, journal = {Advances in Intelligent Systems and Computing}, volume = {1102 AISC}, year = {2020}, note = {cited By 15}, pages = {9-23}, abstract = {Predicting students{\textquoteright} academic outcome is useful for any educational institution that aims to ameliorate students{\textquoteright} performance. Based on the resulted predictions, educators can provide support to students at risk of failure. Data mining and machine learning techniques were widely used to predict students{\textquoteright} performance. This process called Educational data mining. In this work, we have proposed a methodology to build a student{\textquoteright} performance prediction model using a supervised machine learning technique which is the multiple linear regression (MLR). Our methodology consists of three major steps, the first step aims to analyze and preprocess the students{\textquoteright} attributes/variables using a set of statistical analysis methods, and then the second step consists in selecting the most important variables using different methods. The third step aims to construct different MLR models based on the selected variables and compare their performance using the k-fold cross-validation technique. The obtained results show that the model built using the variables selected from the Multivariate Adaptive Regression Splines method (MARS), outperforms the other constructed models. {\textcopyright} Springer Nature Switzerland AG 2020.}, keywords = {Data mining, Education computing, Educational data mining, Forecasting, Intelligent systems, K fold cross validations, Learning systems, Linear regression, Machine learning techniques, Multiple linear regressions, Multivariate adaptive regression splines, Performance prediction models, planning, Predictive analytics, Statistical analysis methods, Students, Supervised learning, Supervised machine learning, Sustainable development}, doi = {10.1007/978-3-030-36653-7_2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078413089\&doi=10.1007\%2f978-3-030-36653-7_2\&partnerID=40\&md5=893772562739d5d20e6b6ae21ebae325}, author = {El Aissaoui, O. and El Alami El Madani, Y. and Oughdir, L. and Dakkak, A. and El Allioui, Y.} } @article {Kharbouch202086, title = {MyContraception: An Evidence-Based Contraception mPHR for Better Contraceptive Fit}, journal = {Advances in Intelligent Systems and Computing}, volume = {1161 AISC}, year = {2020}, note = {cited By 1}, pages = {86-94}, abstract = {The fulfillment of unmet needs for contraception can help women reach their reproductive goals. It was proven to have a significant impact on reducing the rates of unintended pregnancies, and thereby cut the number of morbidity and mortality resulting from these pregnancies, and improving the lives of women and children in general. Therefore, there is a growing concern worldwide about contraception and women{\textquoteright}s knowledge of making an advised-choice about it. In this aspect, an outgrown number of apps are now available providing clinical resources, digital guides, or educational information concerning contraception whether it concerns natural contraception or modern contraception. However, vast amounts of these apps contain inaccurate sexual health facts and non-evidence based information concerning contraception. On these bases, and in respect to the needs of women to effectively prevent unintended pregnancies while conducting a stress-free healthy lifestyle, the World Health Organization (WHO) Medical Eligibility Criteria (MEC) for contraception{\textquoteright}s recommendations, and the results and recommendations of a field study conducted in the reproductive health center Les Oranges in Rabat to collect the app{\textquoteright}s requirements, we developed an Android app named {\textquoteleft}MyContraception{\textquoteright}. Our solution is an evidence-based patient-centered contraceptive app that has been developed in an attempt to facilitate: (1) Seeking evidence-based information along with recommendations concerning the best contraceptive fit (according to one{\textquoteright}s medical characteristics, preferences and priorities) helping users make informed decisions about their contraceptive choices. (2) Monitoring one{\textquoteright}s own menstrual cycle, fertility window, contraceptive methods usage, and the correlation between these different elements and everyday symptoms in one app. (3) Keeping record of one{\textquoteright}s family medical history, medical appointments, analyses, diagnoses, procedures and notes within the same app. In future work, conducting an empirical evaluation of MyContraception solution is intended, to exhaustively examine the effects of this solution in improving the quality of patient-centered contraception care. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Clinical resources, Diagnosis, Eligibility criterion, Empirical evaluations, Healthy lifestyles, Information concerning, Information systems, Information use, Informed decision, Obstetrics, reproductive health, World Health Organization}, doi = {10.1007/978-3-030-45697-9_9}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085491736\&doi=10.1007\%2f978-3-030-45697-9_9\&partnerID=40\&md5=18f9987c1855619561fe9cc080957ade}, author = {Kharbouch, M. and Idri, A. and Rachad, T. and Alami, H. and Redman, L. and Stelate, Y.} } @conference {Amine2020, title = {New Network Slicing Scheme for UE Association Solution in 5G Ultra Dense HetNets}, booktitle = {IEEE International Conference on Communications}, volume = {2020-June}, year = {2020}, note = {cited By 5}, abstract = {Network slicing (NS) will have an essential role to enhance the isolation and the flexibility of the future generation of cellular networks (5G) with heterogeneous capabilities, notably in ultra-dense (UD) urban zones. Thus, it may be the principal component necessary to respond to 5G UD heterogeneous networks (UD-HetNets) technical requirements. The aim of this paper is to propose a new NS architecture to resolve user equipment (UE)-association problem in 5G UD-HetNets. Accordingly, we formulated the problem as a one-to-many matching game based on matching theory, while exploiting the isolation character of slicing to eliminate interferences between pico-cells and those among macro-cell and pico-cells. Next, we proposed the UE-slice association algorithm (U-S.AA) to find the stable matching among user equipments (UEs) and different network slices. Numerical simulation results validate our theoretical model, and prove the efficient of the proposed user-slice association solution to enhance the global network performance, respecting the UEs quality of service (QoS), as well as improving the energy efficiency (EE) of UEs. {\textcopyright} 2020 IEEE.}, keywords = {5G mobile communication systems, Association algorithms, Cellular network, energy efficiency, Future generations, Heterogeneous networks, Matching theory, Network performance, Principal Components, Stable matching, Technical requirement, Theoretical modeling}, doi = {10.1109/ICC40277.2020.9148844}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85089429385\&doi=10.1109\%2fICC40277.2020.9148844\&partnerID=40\&md5=1c1af15932312ae46b6294cfb76fd07d}, author = {Amine, M. and Kobbane, A. and Ben-Othman, J.} } @article {Laghridat2020, title = {A Novel Adaptive Active Disturbance Rejection Control Strategy to Improve the Stability and Robustness for a Wind Turbine Using a Doubly Fed Induction Generator}, journal = {Journal of Electrical and Computer Engineering}, volume = {2020}, year = {2020}, note = {cited By 8}, abstract = {A novel and robust active disturbance rejection control (ADRC) strategy for variable speed wind turbine systems using a doubly fed induction generator (DFIG) is presented in this paper. The DFIG is directly connected to the main utility grid by stator, and its rotor is connected through a back-to-back three phase power converter (AC/DC/AC). Due to the acoustic nature of wind and to ensure capturing maximum energy, a control strategy to extract the available maximum power from the wind turbine by using a maximum power point tracking (MPPT) algorithm is presented. Moreover, a pitch actuator system is used to control the blades{\textquoteright} pitch angle of the wind turbine in order to not exceed the wind turbine rated power value in case of strong wind speeds. Furthermore, the rotor-side converter is used to control the active and reactive powers generated by the DFIG. However, the grid-side converter is used to control the currents injected into the utility grid as well as to regulate the DC-link voltage. This paper aims to study and develop two control strategies for wind turbine system control: Classical control by proportional integral (PI) and the proposed linear active disturbance rejection control (LADRC). The main purpose here is to compare and evaluate the dynamical performances and sensitivity of these controllers to the DFIG parameter variation. Therefore, a series of simulations were carried out in the MATLAB/Simulink environment, and the obtained results have shown the effectiveness of the proposed strategy in terms of efficiency, rapidity, and robustness to internal and external disturbances. {\textcopyright} 2020 Hammadi Laghridat et al.}, keywords = {AC-AC power converters, Active disturbance rejection controls, Asynchronous generators, Disturbance rejection, Doubly fed induction generator (DFIG), Doubly fed induction generators, Electric fault currents, Electric machine control, Linear active disturbance rejection controls, MATLAB, MATLAB/Simulink environment, Maximum power point trackers, Maximum Power Point Tracking, Robustness (control systems), Sensitivity analysis, Three-phase power converter, Two term control systems, Variable speed wind turbines, Wind turbines}, doi = {10.1155/2020/9847628}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85083628356\&doi=10.1155\%2f2020\%2f9847628\&partnerID=40\&md5=b2314c4d19e3bac9f4e6e82dcf54f954}, author = {Laghridat, H. and Essadki, A. and Annoukoubi, M. and Nasser, T.} } @article {ElFazziki2020815, title = {A Novel Collaborative Filtering Approach Based on the Opposite Neighbors{\textquoteright} Preferences}, journal = {Advances in Intelligent Systems and Computing}, volume = {1076}, year = {2020}, note = {cited By 0}, pages = {815-824}, abstract = {Collaborative filtering (CF) has become an effective way to predict useful items. It is the most widespread recommendation technique. It relies on users who share similar tastes and preferences to suggest the items that they might be interested in. Despite its simplicity and justifiability, the collaborative filtering approach experiences many problems, including sparsity, gray sheep and scalability. These problems lead to deteriorating the accuracy of the obtained results. In this work, we present a novel collaborative filtering approach based on the opposite preferences of users. We focus on enhancing the accuracy of predictions and dealing with gray sheep problem by inferring new similar neighbors based on users who have dissimilar tastes and preferences. For instance, if a user X is dissimilar to a user Y then the user +X is similar to the user Y. The Experimental results performed on two datasets including MovieLens and FilmTrust show that our approach outperforms several baseline recommendation techniques. {\textcopyright} Springer Nature Singapore Pte Ltd. 2020.}, keywords = {Artificial intelligence, Collaborative filtering, Embedded systems, Movielens, Recommendation techniques, Similar neighbors}, doi = {10.1007/978-981-15-0947-6_77}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085185804\&doi=10.1007\%2f978-981-15-0947-6_77\&partnerID=40\&md5=68fdeb907c6bb3635b6c19f73a225586}, author = {El Fazziki, A. and El Aissaoui, O. and El Madani El Alami, Y. and Benbrahim, M. and El Allioui, Y.} } @conference {ElMrabtt2020, title = {Novel Convex Polyhedron Classifier for Sentiment Analysis}, booktitle = {Proceedings of 2020 5th International Conference on Cloud Computing and Artificial Intelligence: Technologies and Applications, CloudTech 2020}, year = {2020}, note = {cited By 1}, abstract = {In this paper, we propose a Novel Convex Polyhedron classifier (NCPC) based on the geometric concept convex hull. NCPC is basically a linear piecewise classifier (LPC). It partitions linearly non-separable data into various linearly separable subsets. For each of these subset of data, a linear hyperplane is used to classify them. We evaluate the performance of this classifier by combining it with two feature selection methods (Chi-squared and Anova F-value). Using two datasets, the results indicate that our proposed classifier outperforms other LPC-based classifiers. {\textcopyright} 2020 IEEE.}, keywords = {Artificial intelligence, Chi-Squared, Classification (of information), Cloud computing, Convex hull, Convex polyhedrons, F values, Feature selection methods, Geometric concepts, Geometry, Linearly separable, Piece-wise, Sentiment analysis}, doi = {10.1109/CloudTech49835.2020.9365906}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102620927\&doi=10.1109\%2fCloudTech49835.2020.9365906\&partnerID=40\&md5=143fd1c523fe8ae24563a9b0482f632c}, author = {El Mrabtt, S. and Lazaar, M. and Al Achhab, M. and Omara, H.} } @conference {Illi2020, title = {On the Performance of Dual-Hop SWIPT-based Relaying System with Asymmetric Fading Conditions}, booktitle = {3rd International Conference on Advanced Communication Technologies and Networking, CommNet 2020}, year = {2020}, note = {cited By 1}, abstract = {This paper inspects the error rate and the capacity performance of a dual-hop energy harvesting (EH)-based fixed-gain amplify-and-forward relaying communication system, subject to fading impairments. We consider a source node (S) communicating with a destination node (D) via a relay node (R) equipped with a finite-size battery, which harvests energy from its received signal from S and uses it to power up its communication with D after amplification. Time-switching protocol is examined in the analysis for EH. In addition, the S-R and R-D hops are modeled by the Nakagami-m and $\alpha-μ$ fading models, respectively. A closed-form expression for the cumulative distribution function of the end-to-end signal-to-noise ratio is derived, based on which novel exact and asymptotic closed-form formulas for the average symbol error rate and average channel capacity are derived. The derived expressions are validated through Monte Carlo simulations. {\textcopyright} 2020 IEEE.}, keywords = {Amplify-and-forward relaying, Average channel capacities, Average symbol error rate (SER), Capacity performance, Closed-form expression, Cumulative distribution function, Destination nodes, Distribution functions, Energy harvesting, Fading conditions, Monte Carlo methods, Signal to noise ratio}, doi = {10.1109/CommNet49926.2020.9199629}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85093673625\&doi=10.1109\%2fCommNet49926.2020.9199629\&partnerID=40\&md5=1ab8ab84fab283bfc9161c2765dfe41a}, author = {Illi, E. and Bouanani, F.E. and Ayoub, F.} } @conference {Amri2020461, title = {Performing of users{\textquoteright} road safety at intelligent transportation systems}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2020-June}, year = {2020}, note = {cited By 1}, pages = {461-465}, abstract = {Using smart city technologies and technical advancements in Intelligent Transport Systems, this work aims to improve the safety of road users in different road environments. A new architecture of an intelligent transport system has been proposed in order to ensure the road safety in real time. The proposed Intelligent and Safe Transportation System (ISTS) consists of two components. The first is an intelligent safe traffic management system (ISTMS), the second is a safest route recommendation system (SRRS). The ISTMS uses road user{\textquoteright}s profile information and road environment data to generate and optimize a database of historical risk matrix. Security measures are also taken into account and optimized by the ISTMS in order to transform studied areas into safe ones. The SRRS uses user{\textquoteright}s profile information to recommend the safest itinerary and the most secure mode of transportation ensuring the user{\textquoteright}s safety. {\textcopyright} 2021 IEEE.}, keywords = {Accident prevention, Highway traffic control, Intelligent systems, Intelligent transport systems, Intelligent transportation systems, Intelligent vehicle highway systems, Motor transportation, Road environment, Roads and streets, Security measure, Technical advancement, Traffic management systems, Transportation system, User{\textquoteright}s profiles}, doi = {10.1109/CiSt49399.2021.9357169}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103823225\&doi=10.1109\%2fCiSt49399.2021.9357169\&partnerID=40\&md5=b5765bcf8806d6b43b32d41dc1ea420d}, author = {Amri, S. and Naoum, M. and Lazaar, M. and Achhab, M.A.} } @article {Illi2020, title = {A PHY layer security analysis of a hybrid high throughput satellite with an optical feeder link}, journal = {IEEE Open Journal of the Communications Society}, volume = {1}, year = {2020}, note = {cited By 8}, abstract = {Hybrid terrestrial-satellite (HTS) communication systems have gained a tremendous amount of interest recently due to the high demand for global high data rates. Conventional satellite communications operate in the conventional Ku (12 GHz) and Ka (26.5-40 GHz) radio-frequency bands for assessing the feeder link, between the ground gateway and the satellite. Nevertheless, with the aim to provide hundreds of Mbps of throughput per each user, free-space optical (FSO) feeder links have been proposed to fulfill these high data rates requirements. In this paper, we investigate the physical layer security performance for a hybrid very high throughput satellite communication system with an FSO feeder link. In particular, the satellite receives the incoming optical wave from an appropriate optical ground station, carrying the data symbols of N users through various optical apertures and combines them using the selection combining technique. Henceforth, the decoded and regenerated information signals of the N users are zero-forcing (ZF) precoded in order to cancel the interbeam interference at the end-users. The communication is performed under the presence of malicious eavesdroppers nodes at both hops. Statistical properties of the signal-to-noise ratio of the legitimate and wiretap links at each hop are derived, based on which the intercept probability metric is evaluated. The derived results show that above a certain number of optical apertures, the secrecy level is not improved further. Also, the system{\textquoteright}s secrecy is improved using ZF precoding compared to the no-precoding scenario for some specific nodes{\textquoteright} positions. All the derived analytical expressions are validated through Monte Carlo simulations. {\textcopyright} 2021 Institute of Electrical and Electronics Engineers Inc.. All rights reserved.}, keywords = {Analytical expressions, Feeding, Information signals, Inter-beam interference, Monte Carlo methods, Optical ground station, Physical layer, Physical layer security, Radio frequency bands, Satellite communication systems, Satellite communications, Satellites, Signal to noise ratio, Statistical properties}, doi = {10.1109/OJCOMS.2020.2995327}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85098417335\&doi=10.1109\%2fOJCOMS.2020.2995327\&partnerID=40\&md5=2ca3d202a9febc6959808157b5759568}, author = {Illi, E. and El Bouanani, F. and Ayoub, F. and Alouini, M.-S.} } @article {Bouabdellah2020704, title = {A PHY Layer Security Analysis of Uplink Cooperative Jamming-Based Underlay CRNs with Multi-Eavesdroppers}, journal = {IEEE Transactions on Cognitive Communications and Networking}, volume = {6}, number = {2}, year = {2020}, note = {cited By 17}, pages = {704-717}, abstract = {In this paper, the physical layer security of a dual-hop underlay uplink cognitive radio network is investigated over Nakagami-m fading channels. Specifically, multiple secondary sources (S_i)_1{\l}eq i{\l}eq N are taking turns in accessing the licensed spectrum of the primary users and communicating with a multi-antenna secondary base station (D) through the aid of a multi-antenna relay R in the presence of M eavesdroppers (E_k)_1{\l}eq k{\l}eq M that are also equipped with multiple antennas. Among the remaining nodes, one jammer is randomly selected to transmit an artificial noise to disrupt all the eavesdroppers that are attempting to intercept the communication of the legitimate links, i.e., S_i-R and R-D. The received signals at each node are combined using maximum-ratio combining. Secrecy analysis is provided by deriving closed-form and asymptotic expressions for the secrecy outage probability. The impact of several key parameters on the system{\textquoteright}s secrecy, e.g., transmit power of the sources, number of eavesdroppers, maximum tolerated interference power, and the number of diversity branches is investigated. Importantly, by considering two scenarios, namely (i) absence and (ii) presence of a friendly jammer, new insights are obtained for the considered communication system. Especially, we tend to answer to the following question: Can better secrecy be achieved without jamming by considering a single antenna at eavesdroppers and multiple-ones at the legitimate users (i.e., relay and end-user) rather than sending permanently an artificial noise and considering that both the relay and the destination are equipped with a single antenna, while multiple antennas are used by the eavesdroppers? The obtained results are corroborated through Monte Carlo simulation and show that the system{\textquoteright}s security can be enhanced by adjusting the aforementioned parameters. {\textcopyright} 2015 IEEE.}, keywords = {Antennas, Asymptotic expressions, Cognitive radio, Cognitive radio network, Fading (radio), Fading channels, Intelligent systems, Jamming, Maximum ratio combining, Monte Carlo methods, Multi eavesdroppers, Multi-antenna relay, Nakagami-m fading channels, Physical layer, Physical layer security, Secrecy outage probabilities, Secure communication, Silicon}, doi = {10.1109/TCCN.2019.2957081}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85083785882\&doi=10.1109\%2fTCCN.2019.2957081\&partnerID=40\&md5=b3545ea32455c8abbf9fffef83e08cf0}, author = {Bouabdellah, M. and El Bouanani, F. and Alouini, M.-S.} } @conference {Elmidaoui2020, title = {Predicting software maintainability using ensemble techniques and stacked generalization}, booktitle = {CEUR Workshop Proceedings}, volume = {2725}, year = {2020}, note = {cited By 1}, abstract = {The prediction of software maintainability has emerged as an important research topic to address industry expectations for reducing costs, in particular maintenance costs. In the last decades, many studies have used single techniques to predict software maintainability but there is no agreement as to which technique can achieve the best prediction. Ensemble techniques, which combine two or more techniques, have been investigated in recent years. This study investigates ensemble techniques (homogeneous as well as heterogeneous) for predicting maintainability in terms of line code changes. To this end, well-known homogeneous ensembles such as Bagging, Boosting, Extra Trees, Gradient Boosting, and Random Forest are investigated first. Then the stacked generalization method is used to construct heterogeneous ensembles by combining the most accurate ones per dataset. The empirical results suggest that Gradient Boosting and Extra Trees are the best ensembles for all datasets, since they ranked first and second, respectively. Moreover, the findings of the evaluation of heterogeneous ensembles constructed using stacked generalization showed that they gave better prediction accuracy compared to all homogeneous ensembles. Copyright {\textcopyright} 2020 for this paper by its authors.}, keywords = {Decision trees, Ensemble techniques, Forecasting, Forestry, Gradient boosting, Heterogeneous ensembles, Maintainability, Maintenance cost, Prediction accuracy, Research topics, Software engineering, Software maintainability, Stacked generalization}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85098182236\&partnerID=40\&md5=32bab56e3a64ff6efa7e8717d9ee67c4}, author = {Elmidaoui, S. and Cheikhi, L. and Idri, A. and Abran, A.} } @article {Sayagh2020646, title = {Software Configuration Engineering in Practice Interviews, Survey, and Systematic Literature Review}, journal = {IEEE Transactions on Software Engineering}, volume = {46}, number = {6}, year = {2020}, note = {cited By 12}, pages = {646-673}, abstract = {Modern software applications are adapted to different situations (e.g., memory limits, enabling/disabling features, database credentials) by changing the values of configuration options, without any source code modifications. According to several studies, this flexibility is expensive as configuration failures represent one of the most common types of software failures. They are also hard to debug and resolve as they require a lot of effort to detect which options are misconfigured among a large number of configuration options and values, while comprehension of the code also is hampered by sprinkling conditional checks of the values of configuration options. Although researchers have proposed various approaches to help debug or prevent configuration failures, especially from the end users{\textquoteright} perspective, this paper takes a step back to understand the process required by practitioners to engineer the run-time configuration options in their source code, the challenges they experience as well as best practices that they have or could adopt. By interviewing 14 software engineering experts, followed by a large survey on 229 Java software engineers, we identified 9 major activities related to configuration engineering, 22 challenges faced by developers, and 24 expert recommendations to improve software configuration quality. We complemented this study by a systematic literature review to enrich the experts{\textquoteright} recommendations, and to identify possible solutions discussed and evaluated by the research community for the developers{\textquoteright} problems and challenges. We find that developers face a variety of challenges for all nine configuration engineering activities, starting from the creation of options, which generally is not planned beforehand and increases the complexity of a software system, to the non-trivial comprehension and debugging of configurations, and ending with the risky maintenance of configuration options, since developers avoid touching and changing configuration options in a mature system. We also find that researchers thus far focus primarily on testing and debugging configuration failures, leaving a large range of opportunities for future work. {\textcopyright} 1976-2012 IEEE.}, keywords = {Application programs, Bibliographies, Codes (symbols), Errors, Facebook, Interviews, Program debugging, Software algorithms, Software systems, Surveys, Systematics}, doi = {10.1109/TSE.2018.2867847}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052624554\&doi=10.1109\%2fTSE.2018.2867847\&partnerID=40\&md5=f1b2d51b59522e24e5c52f064a62d00b}, author = {Sayagh, M. and Kerzazi, N. and Adams, B. and Petrillo, F.} } @article {Kharbouch2020894, title = {Software Requirement Catalog on Acceptability, Usability, Internationalization and Sustainability for Contraception mPHRs}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12252 LNCS}, year = {2020}, note = {cited By 0}, pages = {894-905}, abstract = {Contraception Mobile Personal Health Records (mPHRs) are efficient mobile health applications (apps) to increase awareness about fertility and contraception and to allow women to access, track, manage, and share their health data with healthcare providers. This paper aims to develop a requirements catalog, according to standards, guidelines, and relevant literature to e-health technology and psychology. The requirements covered by this catalog are Acceptability, Usability, Sustainability, and Internationalization (i18n). This catalog can be very useful for developing, evaluating, and auditing contraceptive apps, as well as helping stakeholders and developers identify potential requirements for their mPHRs to improve them. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Health, Health care providers, Health data, Mobile health application, Personal health record, Software requirements, Sustainable development}, doi = {10.1007/978-3-030-58811-3_63}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85092222585\&doi=10.1007\%2f978-3-030-58811-3_63\&partnerID=40\&md5=119f051c4582d8079abdb813399d89d1}, author = {Kharbouch, M. and Idri, A. and Redman, L. and Alami, H. and Fernandez-Aleman, J.L. and Toval, A.} } @article {ABDELLAH2020568, title = {Spelling correction for the Arabic language space deletion errors-}, journal = {Procedia Computer Science}, volume = {177}, year = {2020}, note = {The 11th International Conference on Emerging Ubiquitous Systems and Pervasive Networks (EUSPN 2020) / The 10th International Conference on Current and Future Trends of Information and Communication Technologies in Healthcare (ICTH 2020) / Affiliated Workshops}, pages = {568-574}, abstract = {The automatic correction of spelling errors is one of the most used disciplines in different automatic language processing systems, therefore we are dealing in this article the space deletion errors. This type of error was not taken into consideration in the classical Levenshtein algorithm. In this paper, we propose a new approach for the correction of this spelling errors type. This new approach uses an adapted version of the Levenshtein distance to correct this error. For the evaluation of our new approach we used a lexicon extracted from Al Watane newspaper articles. The results showed the importance of our approach.}, keywords = {Arabic language, Levenshtein distance, space deletion errors, spelling errors}, issn = {1877-0509}, doi = {https://doi.org/10.1016/j.procs.2020.10.080}, url = {https://www.sciencedirect.com/science/article/pii/S1877050920323504}, author = {Yousfi Abdellah and Aouragh Si Lhoussain and Gueddah Hicham and Nejja Mohamed} } @conference {Abdellah2020568, title = {Spelling correction for the Arabic language-space deletion errors}, booktitle = {Procedia Computer Science}, volume = {177}, year = {2020}, note = {cited By 5}, pages = {568-574}, abstract = {The automatic correction of spelling errors is one of the most used disciplines in different automatic language processing systems, therefore we are dealing in this article the space deletion errors. This type of error was not taken into consideration in the classical Levenshtein algorithm. In this paper, we propose a new approach for the correction of this spelling errors type. This new approach uses an adapted version of the Levenshtein distance to correct this error. For the evaluation of our new approach we used a lexicon extracted from Al Watane newspaper articles. The results showed the importance of our approach. {\textcopyright} 2020 The Authors. Published by Elsevier B.V.}, keywords = {Arabic languages, Automatic corrections, Computer science, Computers, Errors, Language processing systems, Levenshtein distance, New approaches, space deletion errors, Spelling correction, spelling errors}, doi = {10.1016/j.procs.2020.10.080}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85099878597\&doi=10.1016\%2fj.procs.2020.10.080\&partnerID=40\&md5=696fa31a7debf0be7759cbf273e88803}, author = {Abdellah, Y. and Lhoussain, A.S. and Hicham, G. and Mohamed, N.} } @article {Kerzazi2020472, title = {Teaching Pedigree Analysis and Risk Calculation for Diagnosis Purposes of Genetic Disease}, journal = {Advances in Intelligent Systems and Computing}, volume = {1159 AISC}, year = {2020}, note = {cited By 0}, pages = {472-485}, abstract = {A Faculty of Medicine needs to respond appropriately to the rapid changes in medical education, to ensure that future geneticists are well trained to meet the challenges of medical practice. This paper presents five core requirements for a tool integrating new methodologies for teaching graduate-level a course of medical genetics. Our methodology presented here focuses on exploiting pedigree analysis in the field of medical genetics, particularly to explore them in the diagnosis of genetic diseases. Because of its important relevance as one of the skills that future medical practitioners must have, we designed this approach as a learning process supported by a tool. We also discuss an ongoing supported effort at utilizing common tools and IT resources to make it easier for the learners to reach their expected skill levels and provide them with a rich learning experience. We find that not only do our tool prototype has a positive impact on the learning process, practitioners also have expectations to feed a bio-bank of medical cases as inputs for future empirical studies. {\textcopyright} 2020, The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG.}, keywords = {Chromosomes, Diagnosis, Empirical studies, Information systems, Information use, Learning process, Learning systems, Medical education, Medical genetics, Medical practice, Medical practitioner, Pathology, Pedigree analysis, Rich learning experiences, Risk analysis, Risk assessment, Risk calculation}, doi = {10.1007/978-3-030-45688-7_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85085529134\&doi=10.1007\%2f978-3-030-45688-7_48\&partnerID=40\&md5=7e1e7a85185ad292f5e21b992eefe113}, author = {Kerzazi, N. and Tajir, M. and Boulouiz, R. and Bellaoui, M. and Azizi, M.} } @article {M{\textquoteright}rabet2020121, title = {A Threshold Decoding Algorithm for Non-binary OSMLD Codes}, journal = {Communications in Computer and Information Science}, volume = {1264}, year = {2020}, note = {cited By 0}, pages = {121-133}, abstract = {In this paper, a non-binary version of the Soft-Input Hard-Output Threshold Decoder (TD) is proposed. Threshold decoding was initially devised by Massey for the binary case. This work is the continuity of our previous work, where we presented the Majority Logic Decoding (MLGD) algorithm for non-binary codes. Like the (MLGD), the proposed decoder has low complexity, but it is more suited for OSMLD codes with a large orthogonal structure in their dual codes, such as finite geometry codes. The performances of our decoder are shown in terms of bit, symbol, and block error rates. The obtained results are very encouraging. The obtained results of the TD show an amelioration compared to the MLGD, due to the use of the information received from the channel. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Block error rates, Convolutional codes, Data communication systems, Decoding, Finite geometry, Majority logic, Majority logic decoding, Non-binary codes, Orthogonal structures, Security of data, Soft inputs, Threshold decoders, Threshold decoding}, doi = {10.1007/978-3-030-61143-9_11}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097405119\&doi=10.1007\%2f978-3-030-61143-9_11\&partnerID=40\&md5=6ce0a05b3ed02c120b504b2bfd988f93}, author = {M{\textquoteright}rabet, Z. and Ayoub, F. and Belkasmi, M.} } @article {Aissaoui2020117, title = {Toward a hybrid machine learning approach for extracting and clustering learners{\textquoteright} behaviours in adaptive educational system}, journal = {International Journal of Computing Science and Mathematics}, volume = {12}, number = {2}, year = {2020}, note = {cited By 0}, pages = {117-131}, abstract = {The learning style is a vital learner{\textquoteright}s characteristic that must be considered while recommending learning materials. In this paper we have proposed an approach to identify learning styles automatically. The first step of the proposed approach aims to preprocess the data extracted from the log file of the E-learning environment and capture the learners{\textquoteright} sequences. The captured learners{\textquoteright} sequences were given as an input to the K-means clustering algorithm to group them into sixteen clusters according to the FSLSM. Then the naive Bayes classifier was used to predict the learning style of a student in real time. To perform our approach, we used a real dataset extracted from an e-learning system{\textquoteright}s log file, and in order to evaluate the performance of the used classifier, the confusion matrix method was used. The obtained results demonstrate that our approach yields excellent results. Copyright {\textcopyright} 2020 Inderscience Enterprises Ltd.}, keywords = {Adaptive systems, Classification (of information), Classifiers, Computer aided instruction, Confusion matrices, e-learning, E-learning environment, Educational systems, Hybrid machine learning, K-means clustering, Learning materials, Learning Style, Machine learning, Naive Bayes classifiers, Preprocess}, doi = {10.1504/IJCSM.2020.111113}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096034864\&doi=10.1504\%2fIJCSM.2020.111113\&partnerID=40\&md5=e39786511c32be21cc0fef5be68a0822}, author = {Aissaoui, O.E. and El Alami El Madani, Y. and Oughdir, L. and Dakkak, A. and El Allioui, Y.} } @conference {Reda202091, title = {Towards a data quality assessment in big data}, booktitle = {ACM International Conference Proceeding Series}, year = {2020}, note = {cited By 4}, pages = {91-96}, abstract = {In recent years, as more and more data sources have become available and the volumes of data potentially accessible have increased, the assessment of data quality has taken a central role whether at the academic, professional or any other sector. Given that users are often concerned with the need to filter a large amount of data to better satisfy their requirements and needs, and that data analysis can be based on inaccurate, incomplete, ambiguous, duplicated and of poor quality, it makes everyone wonder what the results of these analyses will really be like. However, there is a very complex process involved in the identification of new, valid, potentially useful and meaningful data from a large data collection and various information systems, and is critically dependent on a number of measures to be developed to ensure data quality. To this end, the main objective of this paper is to introduce a general study on data quality related with big data, by providing what other researchers came up with on that subject. The paper will be finalized by a comparative study between the different existing data quality models. {\textcopyright} 2020 ACM.}, keywords = {Big data, Comparative studies, Complex Processes, data quality, Data quality assessment, Data quality models, Data reduction, Data-sources, Intelligent systems, Large amounts, Large data}, doi = {10.1145/3419604.3419803}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096426287\&doi=10.1145\%2f3419604.3419803\&partnerID=40\&md5=7542ef0e44f4d94b808c64ce466a5eca}, author = {Reda, O. and Sassi, I. and Zellou, A. and Anter, S.} } @article {Mrhar2020535, title = {Towards a semantic integration of data from learning platforms}, journal = {IAES International Journal of Artificial Intelligence}, volume = {9}, number = {3}, year = {2020}, note = {cited By 2}, pages = {535-544}, abstract = {Nowadays, there is a huge production of Massive Open Online Courses MOOCs from universities around the world. The enrolled learners in MOOCs skyrocketed along with the number of the offered online courses. Of late, several universities scrambled to integrate MOOCs in their learning strategy. However, the majority of the universities are facing two major issues: firstly, because of the heterogeneity of the platforms used (e-learning and MOOC platforms), they are unable to establish a communication between the formal and non-formal system; secondly, they are incapable to exploit the feedbacks of the learners in a non-formal learning to personalize the learning according to the learner{\textquoteright}s profile. Indeed, the educational platforms contain an extremely large number of data that are stored in different formats and in different places. In order to have an overview of all data related to their students from various educational heterogeneous platforms, the collection and integration of these heterogeneous data in a formal consolidated system is needed. The principal core of this system is the integration layer which is the purpose of this paper. In this paper, a semantic integration system is proposed. It allows us to extract, map and integrate data from heterogeneous learning platforms {\textquotedblleft}MOOCs platforms, e-learning platforms{\textquotedblright} by solving all semantic conflicts existing between these sources. Besides, we use different learning algorithms (Long short-term memory LSTM, Conditional Random Field CRF) to learn and recognize the mapping between data source and domain ontology. {\textcopyright} 2020, Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijai.v9.i3.pp535-544}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85090554399\&doi=10.11591\%2fijai.v9.i3.pp535-544\&partnerID=40\&md5=786ffdd85ffa5b0112c0728e4bb50266}, author = {Mrhar, K. and Douimi, O. and Abik, M. and Benabdellah, N.C.} } @article {Mrhar20202934, title = {Towards optimize-ESA for text semantic similarity: A case study of biomedical text}, journal = {International Journal of Electrical and Computer Engineering}, volume = {10}, number = {3}, year = {2020}, note = {cited By 1}, pages = {2934-2943}, abstract = {Explicit Semantic Analysis (ESA) is an approach to measure the semantic relatedness between terms or documents based on similarities to documents of a references corpus usually Wikipedia. ESA usage has received tremendous attention in the field of natural language processing NLP and information retrieval. However, ESA utilizes a huge Wikipedia index matrix in its interpretation by multiplying a large matrix by a term vector to produce a high-dimensional vector. Consequently, the ESA process is too expensive in interpretation and similarity steps. Therefore, the efficiency of ESA will slow down because we lose a lot of time in unnecessary operations. This paper propose enhancements to ESA called optimize-ESA that reduce the dimension at the interpretation stage by computing the semantic similarity in a specific domain. The experimental results show clearly that our method correlates much better with human judgement than the full version ESA approach. Copyright {\textcopyright} 2020 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijece.v10i3.pp2934-2943}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079383531\&doi=10.11591\%2fijece.v10i3.pp2934-2943\&partnerID=40\&md5=2965a06637b8b80abe9288b66e1082e7}, author = {Mrhar, K. and Abik, M.} } @conference {Biallaten2020, title = {Truss Optimization using the Simulated Annealing Algorithm}, booktitle = {ACM International Conference Proceeding Series}, year = {2020}, note = {cited By 0}, abstract = {This document presents the results of our work aiming to create a tool for generating trusses. The generated structures undergo an optimizing process to be in the best possible form. The automatic generation of the truss structure is done using an algorithm adapted to the type of an initial design domain. Then, the optimization process is launched to resize the generated structure by minimizing the compliance under constant volume constraint. The size optimization is a question of looking for the best distribution of the cross-sections checking the objective of optimization and guaranteeing the stability of the final solution. We chose to implement this optimization using the simulated annealing algorithm. So, we developed a hybrid method based on the simulated annealing algorithm and an algorithm of bars elimination. {\textcopyright} 2020 ACM.}, keywords = {Automatic Generation, Constant volumes, Constrained optimization, Hybrid method, Information systems, Information use, Initial design, Simulated annealing, Simulated annealing algorithms, Size optimization, Truss optimization, Truss structure, Trusses}, doi = {10.1145/3447568.3448534}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103615098\&doi=10.1145\%2f3447568.3448534\&partnerID=40\&md5=2edcc24e3adf365097bd676455eb98f9}, author = {Biallaten, Z. and Chiheb, R. and El Afia, A.} } @conference {Maha2020, title = {The Two Phases Method for operating rooms planning and scheduling}, booktitle = {2020 IEEE International Conference on Technology Management, Operations and Decisions, ICTMOD 2020}, year = {2020}, note = {cited By 2}, abstract = {Over the last three or four decades, there have been numerous optimization problems in Healthcare which have been approached by researchers. Hospital logistics which must be organized and structured in order to secure patient satisfaction in terms of quality, quantity, time, security and least cost, forms part of the quest for global performance.According to the literature review, the problem of operating rooms planning and scheduling involves different conflicting objectives while considering constraints on availability of rooms, patients and doctors. In this paper, we proposed the Two Phases Method (TPM), which is a general technique that is likely to solve multi-objective combinatorial optimization (MOCO) problems.As it is known, TPM has never been applied to solve operating room planning and scheduling problem. In this paper, we developed the TPM to resolve the cited issue, while focusing on optimizing both total completion time and patients{\textquoteright} waiting time. {\textcopyright} 2020 IEEE.}, keywords = {Combinatorial Optimization, Conflicting objectives, Constraint satisfaction problems, Decision making, Hospital logistics, Industrial management, Literature reviews, Multiobjective combinatorial optimization, Operating rooms, Optimization problems, Patient satisfaction, Planning and scheduling, Scheduling, Total completion time}, doi = {10.1109/ICTMOD49425.2020.9380584}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85103741668\&doi=10.1109\%2fICTMOD49425.2020.9380584\&partnerID=40\&md5=a5086170077f1fe352eb34de4e820aac}, author = {Maha, T. and Achchab, S. and Omar, S.} } @article {Amine2020, title = {Two-sided matching framework for optimal user association in 5G multi-RAT UDNs}, journal = {International Journal of Communication Systems}, volume = {33}, number = {8}, year = {2020}, note = {cited By 2}, abstract = {Recently, academic and industrial research communities are paying more explicit attention to the 5G multiple radio access technology ultra-dense networks (5G multi-RAT UDNs) for boosting network capacity, especially in UD urban zones. To this aim, in this paper, we intend to tackle the user association problem in 5G multi-RAT UDNs. By considering the decoupled uplink/downlink access (DUDA), we divide our user association problem into two distinct subproblems representing, respectively, the uplink and the downlink channels. Next, we formulated each one as a nonlinear optimization problem with binary variables. Then, to solve them, we were restricted by the hard complexity, as well as the hard feasibility of centralized user association schemes. Thus, to resolve our user association problem in a reasonable time and distributed manner, we formulated each subproblem as a many-to-one matching game based on matching theory. Next, we provide two fully distributed association algorithms to compute the uplink and downlink stable matching among user equipments (UEs) and base stations (BSs). Simulation results corroborate our theoretical model and show the effectiveness and improvement of our achieved results in terms of the overall network performance, quality of service (QoS), and energy efficiency (EE) of UEs. {\textcopyright} 2020 John Wiley \& Sons, Ltd.}, keywords = {5G mobile communication systems, 5G multi-RAT UDNs, Association algorithms, decoupled uplink/downlink access (DUDA), energy efficiency, Game theory, Industrial research, Matching game, Matching theory, Network performance, Nonlinear programming, Optimization, Quality of service, Rats, Stable matching}, doi = {10.1002/dac.4346}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078931379\&doi=10.1002\%2fdac.4346\&partnerID=40\&md5=159a3a40021d6b340c2873e4db73e2ac}, author = {Amine, M. and Kobbane, A. and Ben-Othman, J. and Walid, A.} } @conference {Lhazmir2020, title = {UAV for Wireless Power Transfer in IoT Networks: A GMDP approach}, booktitle = {IEEE International Conference on Communications}, volume = {2020-June}, year = {2020}, note = {cited By 3}, abstract = {Unmanned aerial vehicles (UAVs) are a promising technology employed as moving aggregators and wireless power transmitters for IoT networks. In this paper, we consider an UAV-IoT wireless energy and data transmission system and the decision-making problem is investigated. We aim at optimizing the nodes{\textquoteright} utilities by defining a good packet delivery and energy transfer policy according to the system state. We formulate the problem as a Markov Decision Process (MDP) to tackle the successive decision issues. As the MDP formalism achieves its limits when the neighbors{\textquoteright} interactions are considered, we formulate the problem as a Graph-based MDP (GMDP). We then propose a Mean-Field Approximation (MFA) algorithm to find a solution. The simulation results show that our framework achieves a good analysis of the system behavior. {\textcopyright} 2020 IEEE.}, keywords = {Antennas, Approximation algorithms, Behavioral research, Decision making, Decision-making problem, Energy transfer, Graphic methods, Inductive power transmission, Internet of things, Markov Decision Processes, Markov processes, Mean field approximation, Packet Delivery, System behaviors, System state, Unmanned aerial vehicles (UAV), Wireless energy, Wireless power}, doi = {10.1109/ICC40277.2020.9148956}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85089418365\&doi=10.1109\%2fICC40277.2020.9148956\&partnerID=40\&md5=a3d1ed58b9a7436f2191be8372102f51}, author = {Lhazmir, S. and Oualhaj, O.A. and Kobbane, A. and Amlioud, E.M. and Ben-Othman, J.} } @article {AlAfandy2020770, title = {Using classic networks for classifying remote sensing images: Comparative study}, journal = {Advances in Science, Technology and Engineering Systems}, volume = {5}, number = {5}, year = {2020}, note = {cited By 3}, pages = {770-780}, abstract = {This paper presents a comparative study for using the deep classic convolution networks in remote sensing images classification. There are four deep convolution models that used in this comparative study; the DenseNet 196, the NASNet Mobile, the VGG 16, and the ResNet 50 models. These learning convolution models are based on the use of the ImageNet pre-trained weights, transfer learning, and then adding a full connected layer that compatible with the used dataset classes. There are two datasets are used in this comparison; the UC Merced land use dataset and the SIRI-WHU dataset. This comparison is based on the inspection of the learning curves to determine how well the training model is and calculating the overall accuracy that determines the model performance. This comparison illustrates that the use of the ResNet 50 model has the highest overall accuracy and the use of the NASNet Mobile model has the lowest overall accuracy in this study. The DenseNet 169 model has little higher overall accuracy than the VGG 16 model. {\textcopyright} 2020 ASTES Publishers. All rights reserved.}, doi = {10.25046/AJ050594}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85095877228\&doi=10.25046\%2fAJ050594\&partnerID=40\&md5=763c007cae5730097994ccc641ce3f60}, author = {AlAfandy, K.A. and Omara, H. and Lazaar, M. and Achhab, M.A.} } @article {Sayagh20201259, title = {What should your run-time configuration framework do to help developers?}, journal = {Empirical Software Engineering}, volume = {25}, number = {2}, year = {2020}, note = {cited By 2}, pages = {1259-1293}, abstract = {The users or deployment engineers of a software system can adapt such a system to a wide range of deployment and usage scenarios by changing the value of configuration options, for example by disabling unnecessary features, tweaking performance-related parameters or specifying GUI preferences. However, the literature agrees that the flexibility of such options comes at a price: misconfigured options can lead a software system to crash in the production environment, while even in the absence of such configuration errors, a large number of configuration options makes a software system more complicated to deploy and use. In earlier work, we also found that developers who intend to make their application configurable face 22 challenges that impact their configuration engineering activities, ranging from technical to management-related or even inherent to the domain of configuration engineering. In this paper, we use a prototyping approach to derive and empirically evaluate requirements for tool support able to deal with 13 (primarily technical) configuration engineering challenges. In particular, via a set of interviews with domain experts, we identify four requirements by soliciting feedback on an incrementally evolving prototype. The resulting {\textquotedblleft}Config2Code{\textquotedblright} prototype, which implements the four requirements, is then empirically evaluated via a user study involving 55 participants that comprises 10 typical configuration engineering tasks, ranging from the creation, comprehension, refactoring, and reviewing of configuration options to the quality assurance of options and debugging of configuration failures. A configuration framework satisfying the four requirements enables developers to perform more accurately and more swiftly in 70\% and 60\% (respectively) of the configuration engineering tasks than a state-of-the-practice framework not satisfying the requirements. Furthermore, such a framework allows to reduce the time taken for these tasks by up to 94.62\%, being slower for only one task. {\textcopyright} 2020, Springer Science+Business Media, LLC, part of Springer Nature.}, keywords = {Computer software, Empirical studies, Engineering activities, Engineering challenges, Production environments, Quality assurance, Quality control, Run-time configuration, Run-time software, State of the practice, User study}, doi = {10.1007/s10664-019-09790-x}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078290351\&doi=10.1007\%2fs10664-019-09790-x\&partnerID=40\&md5=16acb880be24429edc0728ccce2169e2}, author = {Sayagh, M. and Kerzazi, N. and Petrillo, F. and Bennani, K. and Adams, B.} } @article {Berradi2020677, title = {Wireless network stability enhancement based on spatial dependency}, journal = {Lecture Notes in Electrical Engineering}, volume = {684 LNEE}, year = {2020}, note = {cited By 0}, pages = {677-686}, abstract = {The Ad hoc network is a part of the IoT environment, including several types of networks like MANET. It uses different categories of routing protocol, but the OLSR routing protocol is the best one for MANET. The mobility concept has an impact on the evolution of network performances. In the OLSR routing protocol, this mobility influences on the choice of the MultiPoint Relay (MPR). In this paper, the main objective is to develop an algorithm enhancing MPR selection process in such networks. This algorithm based on Average Spatial Dependency metric {\textquotedblleft}ASD{\textquotedblright} linked to the average Relative Speed {\textquotedblleft}RS{\textquotedblright} and the average Relative Acceleration {\textquotedblleft}RA{\textquotedblright}. Each node shares these values with its neighbors using the messages of OLSR routing protocol (Hello and TC messages). Furthermore, if the nodes have equal reachability, the highest ASD value will be selected as a criterion of the choice of their MPR set. In the simulation, we have applicated the Manhattan mobility model in the MANET network, and we have used the NS3 simulator. The results of this simulation have shown that the mobility concept could change network performances in terms of packet loss, end-to-end delay, and Throughput. {\textcopyright} The Editor(s) (if applicable) and The Author(s), under exclusive license to Springer Nature Switzerland AG 2020.}, keywords = {End to end delay, Manhattan mobility models, Mobile ad hoc networks, Mpr selections, multipoint relays, Network performance, Network stability, Relative acceleration, Relative speed, Routing protocols, Spatial dependencies}, doi = {10.1007/978-3-030-53187-4_74}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85089717007\&doi=10.1007\%2f978-3-030-53187-4_74\&partnerID=40\&md5=845455db1c85cc5d0e34104203afb7ba}, author = {Berradi, H. and Habbani, A. and Benjbara, C. and Mouchfiq, N. and Amraoui, H.} } @conference {ElOuahabi2019, title = {Bandwidth Enhancement of Complementary Split Ring Based Antenna}, booktitle = {International Symposium on Advanced Electrical and Communication Technologies, ISAECT 2018 - Proceedings}, year = {2019}, note = {cited By 0}, doi = {10.1109/ISAECT.2018.8618820}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062440605\&doi=10.1109\%2fISAECT.2018.8618820\&partnerID=40\&md5=40529d3f58aa8740187c10d6c4f1f5f5}, author = {El Ouahabi, K. and Roky, K. and El Mrabet, O. and Aznabet, M. and Ennasar, M.A.} } @conference {Assila2019, title = {Caching as a Service in 5G Networks: Intelligent Transport and Video on Demand Scenarios}, booktitle = {2018 IEEE Global Communications Conference, GLOBECOM 2018 - Proceedings}, year = {2019}, note = {cited By 0}, doi = {10.1109/GLOCOM.2018.8648110}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063521486\&doi=10.1109\%2fGLOCOM.2018.8648110\&partnerID=40\&md5=b7935edacd24dfb7acbd2e5dbe0daed6}, author = {Assila, B. and Kobbane, A. and El Koutbi, M. and Ben-Othman, J. and Mokdad, L.} } @conference {Aissaoui201987, title = {Combining supervised and unsupervised machine learning algorithms to predict the learners{\textquoteright} learning styles}, booktitle = {Procedia Computer Science}, volume = {148}, year = {2019}, note = {cited By 0}, pages = {87-96}, doi = {10.1016/j.procs.2019.01.012}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062675875\&doi=10.1016\%2fj.procs.2019.01.012\&partnerID=40\&md5=bac9366951ce3839b307d3270ef38ae2}, author = {Aissaoui, O.E. and El Madani, Y.E.A. and Oughdir, L. and Allioui, Y.E.} } @conference {Ennya2019, title = {Computing Tasks Distribution in Fog Computing: Coalition Game Model}, booktitle = {Proceedings - 2018 International Conference on Wireless Networks and Mobile Communications, WINCOM 2018}, year = {2019}, note = {cited By 0}, doi = {10.1109/WINCOM.2018.8629587}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062835283\&doi=10.1109\%2fWINCOM.2018.8629587\&partnerID=40\&md5=6f05637837203320cf2f7daa8f8c242d}, author = {Ennya, Z. and Hadi, M.Y. and Abouaomar, A.} } @conference {Lakhili201912, title = {Deformable 3D Shape Classification Using 3D Racah Moments and Deep Neural Networks}, booktitle = {Procedia Computer Science}, volume = {148}, year = {2019}, note = {cited By 0}, pages = {12-20}, doi = {10.1016/j.procs.2019.01.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062651075\&doi=10.1016\%2fj.procs.2019.01.002\&partnerID=40\&md5=4a812fcbc857dbf995b08a055c11fc9d}, author = {Lakhili, Z. and El Alami, A. and Mesbah, A. and Berrahou, A. and Qjidaa, H.} } @conference {Haddad2019, title = {Efficient Stacked Cylindrical Dielectric Resonator Antenna for Anticollision Short Range Radar at 79GHz}, booktitle = {International Symposium on Advanced Electrical and Communication Technologies, ISAECT 2018 - Proceedings}, year = {2019}, note = {cited By 0}, doi = {10.1109/ISAECT.2018.8618833}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062468314\&doi=10.1109\%2fISAECT.2018.8618833\&partnerID=40\&md5=0e4f2684dccbbbf03196e55a1fa16de3}, author = {Haddad, A. and Aoutoul, M. and Rais, K. and Faqir, M. and Essaaidi, M. and Lakssir, B. and Moukhtafi, F.E. and Jouali, R.} } @article {ElKoutbi2019, title = {Empirical evaluation of an entropy-based approach to estimation variation of software development effort}, journal = {Journal of Software: Evolution and Process}, volume = {31}, number = {3}, year = {2019}, note = {cited By 0}, doi = {10.1002/smr.2149}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063377815\&doi=10.1002\%2fsmr.2149\&partnerID=40\&md5=f8f11d4ac305f27600a3f8dec5af170c}, author = {El Koutbi, S. and Idri, A. and Abran, A.} } @article {Hosni2019, title = {Evaluating filter fuzzy analogy homogenous ensembles for software development effort estimation}, journal = {Journal of Software: Evolution and Process}, volume = {31}, number = {2}, year = {2019}, note = {cited By 0}, doi = {10.1002/smr.2117}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061585023\&doi=10.1002\%2fsmr.2117\&partnerID=40\&md5=72983b70d540ccb14da59885e96cc4ab}, author = {Hosni, M. and Idri, A. and Abran, A.} } @conference {Khaldi2019532, title = {Forecasting of weekly patient visits to emergency department: Real case study}, booktitle = {Procedia Computer Science}, volume = {148}, year = {2019}, note = {cited By 0}, pages = {532-541}, doi = {10.1016/j.procs.2019.01.026}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062671151\&doi=10.1016\%2fj.procs.2019.01.026\&partnerID=40\&md5=2d1d746e66e3334ab894f34b3566ecf8}, author = {Khaldi, R. and Afia, A.E. and Chiheb, R.} } @conference {Abdellatif2019, title = {Graph-Based Computing Resource Allocation for Mobile Blockchain}, booktitle = {Proceedings - 2018 International Conference on Wireless Networks and Mobile Communications, WINCOM 2018}, year = {2019}, note = {cited By 0}, doi = {10.1109/WINCOM.2018.8629599}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062817982\&doi=10.1109\%2fWINCOM.2018.8629599\&partnerID=40\&md5=64a9c993d336cb3ae9644691ba7d0db6}, author = {Abdellatif, K. and Abdelmouttalib, C.} } @article {Bouzbita2019153, title = {Hidden Markov Model classifier for the adaptive ACS-TSP pheromone parameters}, journal = {Studies in Computational Intelligence}, volume = {774}, year = {2019}, note = {cited By 0}, pages = {153-169}, doi = {10.1007/978-3-319-95104-1_10}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053070003\&doi=10.1007\%2f978-3-319-95104-1_10\&partnerID=40\&md5=a3b7e585281ec5fd0d0268e2187f8014}, author = {Bouzbita, S. and El Afia, A. and Faizi, R.} } @article {Biallaten20191327, title = {Hybrid method for automating generation of reticulated structures (lattice structures) in complex conception domains}, journal = {International Journal of Electrical and Computer Engineering}, volume = {9}, number = {2}, year = {2019}, note = {cited By 0}, pages = {1327-1334}, doi = {10.11591/ijece.v9i2.pp.1327-1334}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059986939\&doi=10.11591\%2fijece.v9i2.pp.1327-1334\&partnerID=40\&md5=9e8baf15f27ff8bc5dd5843538b859cc}, author = {Biallaten, Z. and Chiheb, R. and El Afia, A.} } @article {Zarnoufi2019672, title = {Language identification for user generated content in social media}, journal = {Smart Innovation, Systems and Technologies}, volume = {111}, year = {2019}, note = {cited By 0}, pages = {672-678}, doi = {10.1007/978-3-030-03577-8_73}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85056449806\&doi=10.1007\%2f978-3-030-03577-8_73\&partnerID=40\&md5=0788a6d5259f75585d3c1d415a6c7300}, author = {Zarnoufi, R. and Jaafar, H. and Abik, M.} } @article {Bousqaoui2019301, title = {Machine learning applications in supply chains: Long short-term memory for demand forecasting}, journal = {Lecture Notes in Networks and Systems}, volume = {49}, year = {2019}, note = {cited By 1}, pages = {301-317}, doi = {10.1007/978-3-319-97719-5_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063274139\&doi=10.1007\%2f978-3-319-97719-5_19\&partnerID=40\&md5=ac9a051fef5a767873929baf29b00612}, author = {Bousqaoui, H. and Achchab, S. and Tikito, K.} } @conference {Abouaomar2019, title = {Matching-Game for User-Fog Assignment}, booktitle = {2018 IEEE Global Communications Conference, GLOBECOM 2018 - Proceedings}, year = {2019}, note = {cited By 0}, doi = {10.1109/GLOCOM.2018.8647545}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063543941\&doi=10.1109\%2fGLOCOM.2018.8647545\&partnerID=40\&md5=2e86072aa15d3175de06d25bff61c79f}, author = {Abouaomar, A. and Kobbane, A. and Cherkaoui, S.} } @article {ElAmrani2019109, title = {Multi-capacitated Location Problem: A New Resolution Method Combining Exact and Heuristic Approaches Based on Set Partitioning}, journal = {Studies in Computational Intelligence}, volume = {774}, year = {2019}, note = {cited By 0}, pages = {109-121}, doi = {10.1007/978-3-319-95104-1_7}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053069722\&doi=10.1007\%2f978-3-319-95104-1_7\&partnerID=40\&md5=08ad9adabe61d8494d5b09f76263829e}, author = {El Amrani, M. and Benadada, Y. and Gendron, B.} } @article {Assami201921, title = {Ontology-based modeling for a personalized mooc recommender system}, journal = {Smart Innovation, Systems and Technologies}, volume = {111}, year = {2019}, note = {cited By 0}, pages = {21-28}, doi = {10.1007/978-3-030-03577-8_3}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85056482148\&doi=10.1007\%2f978-3-030-03577-8_3\&partnerID=40\&md5=38ab9ecb20bd621f80b6f281454278f1}, author = {Assami, S. and Daoudi, N. and Ajhoun, R.} } @article {ElAfia2019337, title = {Runtime prediction of optimizers using improved support vector machine}, journal = {Lecture Notes in Networks and Systems}, volume = {49}, year = {2019}, note = {cited By 0}, pages = {337-350}, doi = {10.1007/978-3-319-97719-5_21}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063278208\&doi=10.1007\%2f978-3-319-97719-5_21\&partnerID=40\&md5=3326fe51db036a4ca8af1da02286aba7}, author = {El Afia, A. and Sarhani, M.} } @conference {Illi2019, title = {On the Secrecy Performance of Mixed RF/UOW Communication System}, booktitle = {2018 IEEE Globecom Workshops, GC Wkshps 2018 - Proceedings}, year = {2019}, note = {cited By 1}, doi = {10.1109/GLOCOMW.2018.8644314}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055381999\&doi=10.1109\%2fGLOCOMW.2018.8644314\&partnerID=40\&md5=fc4e23b9414d6feaac033c613bdfb08f}, author = {Illi, E. and Bouanani, F.E. and Da Costa, D.B. and Ayoub, F. and Dias, U.S.} } @conference {Afia2019512, title = {A Self Controlled Simulated Annealing Algorithm using Hidden Markov Model State Classification}, booktitle = {Procedia Computer Science}, volume = {148}, year = {2019}, note = {cited By 0}, pages = {512-521}, doi = {10.1016/j.procs.2019.01.024}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062665945\&doi=10.1016\%2fj.procs.2019.01.024\&partnerID=40\&md5=7a6b9cd9dfff11052107df70b4b828f0}, author = {Afia, A.E. and Lalaoui, M. and Chiheb, R.} } @article {Chabibi201917, title = {SimulML: A DSML for simulating SysML models}, journal = {Journal of Computing Science and Engineering}, volume = {13}, number = {1}, year = {2019}, note = {cited By 0}, pages = {17-31}, doi = {10.5626/JCSE.2019.13.1.17}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064554918\&doi=10.5626\%2fJCSE.2019.13.1.17\&partnerID=40\&md5=9335bd1dbb7ad1c561bf7e9b03c66c37}, author = {Chabibi, B. and Nassar, M. and Anwar, A.} } @article {Nassif2019, title = {Software development effort estimation using regression fuzzy models}, journal = {Computational Intelligence and Neuroscience}, volume = {2019}, year = {2019}, note = {cited By 0}, doi = {10.1155/2019/8367214}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062599935\&doi=10.1155\%2f2019\%2f8367214\&partnerID=40\&md5=efd8130615f3d9857ff368174c7a2db1}, author = {Nassif, A.B. and Azzeh, M. and Idri, A. and Abran, A.} } @article {Almorabeti2018508, title = {2X2 circular polarized antenna array for RF energy harvesting system to a UAV}, journal = {International Journal of Microwave and Optical Technology}, volume = {13}, number = {6}, year = {2018}, pages = {508-516}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057379019\&partnerID=40\&md5=9520b64bbb9f545c406e032984728a7c}, author = {Almorabeti, S. and Rifi, M. and Terchoune, H. and Tizyi, H. and Afyf, A. and Latrach, M.} } @conference {Assila2018213, title = {Achieving low-energy consumption in fog computing environment: A matching game approach}, booktitle = {19th IEEE Mediterranean Eletrotechnical Conference, MELECON 2018 - Proceedings}, year = {2018}, pages = {213-218}, doi = {10.1109/MELCON.2018.8379096}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050157229\&doi=10.1109\%2fMELCON.2018.8379096\&partnerID=40\&md5=0250d5d03e5322d270460ae3af21ca94}, author = {Assila, B. and Kobbane, A. and Walid, A. and El Koutbi, M.} } @conference {Boulmrharj201856, title = {Approach for dimensioning stand-alone photovoltaic systems}, booktitle = {Energy Procedia}, volume = {153}, year = {2018}, pages = {56-61}, doi = {10.1016/j.egypro.2018.10.058}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057433465\&doi=10.1016\%2fj.egypro.2018.10.058\&partnerID=40\&md5=b03f45460b45700ebfe92eb2f1b70e53}, author = {Boulmrharj, S. and NaitMalek, Y. and Elmouatamid, A. and Bakhouya, M. and Ouladsine, R. and Zine-Dine, K. and Khaidar, M. and Abid, R.} } @conference {Moumen2018, title = {Arabic diacritization with gated recurrent unit}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230931}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484931\&doi=10.1145\%2f3230905.3230931\&partnerID=40\&md5=3a401539dc4a1f24a5a97d1b3fdc8e36}, author = {Moumen, R. and Chiheb, R. and Afia, A.E. and Faizi, R.} } @conference {Benouda20181, title = {Automatic code generation within MDA approach for cross-platform mobiles apps}, booktitle = {Proceedings of EDIS 2017 - 1st International Conference on Embedded and Distributed Systems}, volume = {2017-December}, year = {2018}, pages = {1-5}, doi = {10.1109/EDIS.2017.8284045}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050342787\&doi=10.1109\%2fEDIS.2017.8284045\&partnerID=40\&md5=a91e72b83d0260e52818eee5dcb6c6fe}, author = {Benouda, H. and Azizi, M. and Moussaoui, M. and Esbai, R.} } @conference {Labghough20181, title = {Bit error probability analysis for majority logic decoding of CSOC codes over fading channels}, booktitle = {Proceedings - 2018 International Conference on Advanced Communication Technologies and Networking, CommNet 2018}, year = {2018}, pages = {1-6}, doi = {10.1109/COMMNET.2018.8360256}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048329058\&doi=10.1109\%2fCOMMNET.2018.8360256\&partnerID=40\&md5=cd402424adfc89cc9d288cf2d4c17684}, author = {Labghough, S. and Ayoub, F. and Belkasmi, M.} } @conference {Assila20181193, title = {Caching as a Service for 5G Networks: A Matching Game Approach for CaaS Resource Allocation}, booktitle = {Proceedings - IEEE Symposium on Computers and Communications}, volume = {2018-June}, year = {2018}, pages = {1193-1198}, doi = {10.1109/ISCC.2018.8538661}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059217607\&doi=10.1109\%2fISCC.2018.8538661\&partnerID=40\&md5=d33d4f8d2f2cd79a86990bb8aeeaed3a}, author = {Assila, B. and Kobbane, A. and Ben-Othman, J. and Koutbi, M.E.} } @article {Labghough2018200, title = {A closed form expression for the bit error probability for majority logic decoding of CSOC codes over ΓΓ channels}, journal = {Advances in Intelligent Systems and Computing}, volume = {735}, year = {2018}, pages = {200-210}, doi = {10.1007/978-3-319-76354-5_18}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044029464\&doi=10.1007\%2f978-3-319-76354-5_18\&partnerID=40\&md5=ccb2b721b8e8c6619ba59f4805aaab19}, author = {Labghough, S. and Ayoub, F. and Belkasmi, M.} } @conference {Touhafi20181, title = {CloudLab-weaver: An innovative approach to interconnect distributed cloud based online labs}, booktitle = {Proceedings of 2017 International Conference of Cloud Computing Technologies and Applications, CloudTech 2017}, volume = {2018-January}, year = {2018}, pages = {1-7}, doi = {10.1109/CloudTech.2017.8284739}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046685180\&doi=10.1109\%2fCloudTech.2017.8284739\&partnerID=40\&md5=f23d16e0a04a343a36ac33365447f357}, author = {Touhafi, A. and Braeken, A. and Alla, S.B. and Zbakh, M.} } @article {Sahnoun201826, title = {A coalition-formation game model for energy-efficient routing in mobile Ad-hoc network}, journal = {International Journal of Electrical and Computer Engineering}, volume = {8}, number = {1}, year = {2018}, pages = {26-33}, doi = {10.11591/ijece.v8i1.pp26-33}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042920602\&doi=10.11591\%2fijece.v8i1.pp26-33\&partnerID=40\&md5=b4b2c8c95ff6ea605c6d376020831a06}, author = {Sahnoun, A. and Habbani, A. and El Abbadi, J.} } @article {Sahnoun201826, title = {A coalition-formation game model for energy-efficient routing in mobile Ad-hoc network}, journal = {International Journal of Electrical and Computer Engineering}, volume = {8}, number = {1}, year = {2018}, note = {cited By 0}, pages = {26-33}, abstract = {One of the most routing problems in Mobile Ad-hoc Network is the node{\textquoteright}s selfishness. Nodes are generally selfish and try to maximize their own benefit; hence these nodes refuse to forward packet on behalf of others to preserve their limited energy resources. This selfishness may lead to a low efficiency of routing. Therefore, it is important to study mechanisms which can be used encourage cooperation among nodes, to maintain the network efficiency. In this paper, we propose a cooperative game theoretic model to support more energy-aware and available bandwidth routing in MANET. We introduce a novel framework from coalitional-formation game theory, called hedonic coalition-formation game. We integrate this model to OLSR protocol that is an optimization over the classical link state protocol for the MANETs. Within each coalition, a coalition coordinator acts as a special MPR node to improve the energy efficient and the packet success rate of the transmission. Simulation results show how the proposed algorithm improve the performance in terms of the percentage of selected MPR nodes in the network, the percentage of alive nodes by time, and the Packet Delivery Ratio. Which prove that our proposed model leads, to better results compared to the classical OLSR. {\textcopyright} 2018 Institute of Advanced Engineering and Science.}, doi = {10.11591/ijece.v8i1.pp26-33}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042920602\&doi=10.11591\%2fijece.v8i1.pp26-33\&partnerID=40\&md5=b4b2c8c95ff6ea605c6d376020831a06}, author = {Sahnoun, A. and Habbani, A. and El Abbadi, J.} } @article {Aylaj2018982, title = {Construction of regular quasi cyclic-low density parity check codes from cyclic codes}, journal = {ARPN Journal of Engineering and Applied Sciences}, volume = {13}, number = {3}, year = {2018}, pages = {982-989}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042232450\&partnerID=40\&md5=28e1cbb2eff93cd7408cd2458e5dd3b4}, author = {Aylaj, B. and Belkasmi, M. and Nouh, S. and Zouaki, H.} } @article {Aylaj2018982, title = {Construction of regular quasi cyclic-low density parity check codes from cyclic codes}, journal = {ARPN Journal of Engineering and Applied Sciences}, volume = {13}, number = {3}, year = {2018}, note = {cited By 0}, pages = {982-989}, abstract = {Low Density Parity Check Codes (LDPC) are a class of linear error-correcting codes which have shown ability to approach or even to reach the capacity of the transmission channel. This class of code approaches asymptotically the fundamental limit of information theory more than the Turbo Convolutional codes. It{\textquoteright}s ideal for long distance transmission satellite, mobile communications and it{\textquoteright}s also used in storage systems. In this paper, a new method for constructing quasicyclic low density parity-check (QC-LDPC) codes derived from cyclic codes is presented. The proposed method reduces the incidence vectors, by eliminating the conjugates lines in parity-check matrix of the derived cyclic code to construct circulant shifting sub-matrices. In the end, this method produces a large class of regular LDPC codes of quasi-cyclic structure having very low density, high coding rates and Tanner graphs which have no short cycles with girth of at least 6. Performance with computer simulations are also shown in this work for some constructed codes. {\textcopyright} 2006-2018 Asian Research Publishing Network (ARPN).}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042232450\&partnerID=40\&md5=28e1cbb2eff93cd7408cd2458e5dd3b4}, author = {Aylaj, B. and Belkasmi, M. and Nouh, S. and Zouaki, H.} } @conference {Assila20181345, title = {A Cournot Economic Pricing Model for Caching Resource Management in 5G Wireless Networks}, booktitle = {2018 14th International Wireless Communications and Mobile Computing Conference, IWCMC 2018}, year = {2018}, pages = {1345-1350}, doi = {10.1109/IWCMC.2018.8450538}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053888603\&doi=10.1109\%2fIWCMC.2018.8450538\&partnerID=40\&md5=7f9917ffc8d0c6f0be1c40a7bb027e30}, author = {Assila, B. and Kobbane, A. and El Koutbi, M.} } @article {Haddad201812, title = {Design of high gain novel dielectric resonator antenna array for 24 GHz short range radar systems}, journal = {Advanced Electromagnetics}, volume = {7}, number = {4}, year = {2018}, pages = {12-18}, doi = {10.7716/aem.v7i4.874}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052828658\&doi=10.7716\%2faem.v7i4.874\&partnerID=40\&md5=f03b09d177b785f22ce0d744b8db6f72}, author = {Haddad, A. and Aoutoul, M. and Rais, K. and Essaaidi, M. and Faqir, M. and Bouya, M.} } @conference {Maleky2018428, title = {Design of simple printed Dipole antenna on flexible substrate for UHF band}, booktitle = {Procedia Manufacturing}, volume = {22}, year = {2018}, pages = {428-435}, doi = {10.1016/j.promfg.2018.03.067}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049256549\&doi=10.1016\%2fj.promfg.2018.03.067\&partnerID=40\&md5=b2c8132bde575104181fef44b8a91d6d}, author = {Maleky, O.E. and Abdelouahab, F.B. and Essaaidi, M. and Ennasar, M.A.} } @article {Illi201855345, title = {Dual-hop mixed RF-UOW communication system: A PHY Security Analysis}, journal = {IEEE Access}, volume = {6}, year = {2018}, pages = {55345-55360}, doi = {10.1109/ACCESS.2018.2870344}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054276852\&doi=10.1109\%2fACCESS.2018.2870344\&partnerID=40\&md5=3402dea4c38b8cc6d781800616c3f6e5}, author = {Illi, E. and El Bouanani, F. and Da Costa, D.B. and Ayoub, F. and Dias, U.S.} } @article {Avanzini2018379, title = {Evaluating perceived quality through sensory evaluation in the development process of new products: A case study of luxury market}, journal = {Advances in Intelligent Systems and Computing}, volume = {739}, year = {2018}, pages = {379-388}, doi = {10.1007/978-981-10-8612-0_40}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044184043\&doi=10.1007\%2f978-981-10-8612-0_40\&partnerID=40\&md5=e0c2647850f1e2194831321ca22d7664}, author = {Avanzini, C. and Mantelet, F. and Aoussat, A. and Jeanningros, F. and Bouchard, C.} } @article {Idri2018, title = {Evaluating Pred(p) and standardized accuracy criteria in software development effort estimation}, journal = {Journal of Software: Evolution and Process}, volume = {30}, number = {4}, year = {2018}, doi = {10.1002/smr.1925}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045686530\&doi=10.1002\%2fsmr.1925\&partnerID=40\&md5=3a3d72a1a9b32d7a365db2168b5e3a45}, author = {Idri, A. and Abnane, I. and Abran, A.} } @article {Moumen2018360, title = {Evaluation of gated recurrent unit in Arabic diacritization}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {9}, number = {11}, year = {2018}, pages = {360-364}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059029808\&partnerID=40\&md5=8ec6347667792a079047e6121cfd5999}, author = {Moumen, R. and Chiheb, R. and Faizi, R. and El Afia, A.} } @conference {Faizi20182109, title = {Exploring the potentials of big data analytics in marketing}, booktitle = {Proceedings of the 31st International Business Information Management Association Conference, IBIMA 2018: Innovation Management and Education Excellence through Vision 2020}, year = {2018}, pages = {2109-2114}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060828578\&partnerID=40\&md5=f6ed0328b04bb583011318fc69ad0c58}, author = {Faizi, R. and Fkihi, S.E. and Afia, A.E.} } @article {Sarhani2018447, title = {Facing the feature selection problem with a binary PSO-GSA approach}, journal = {Operations Research/ Computer Science Interfaces Series}, volume = {62}, year = {2018}, pages = {447-462}, doi = {10.1007/978-3-319-58253-5_26}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032615163\&doi=10.1007\%2f978-3-319-58253-5_26\&partnerID=40\&md5=45163b8637f55b8af04f902f4f33afc2}, author = {Sarhani, M. and El Afia, A. and Faizi, R.} } @article {Sarhani2018447, title = {Facing the feature selection problem with a binary PSO-GSA approach}, journal = {Operations Research/ Computer Science Interfaces Series}, volume = {62}, year = {2018}, note = {cited By 0}, pages = {447-462}, abstract = {Feature selection has become the focus of much research in many areas where we can face the problem of big data or complex relationship among features. Metaheuristics have gained much attention in solving many practical problems, including feature selection. Our contribution in this paper is to propose a binary hybrid metaheuristic to minimize a fitness function representing a trade-off between the classification error of selecting the feature subset and the corresponding number of features. This algorithm combines particle swarm optimization (PSO) and gravitational search algorithm (GSA). Also, a mutation operator is integrated to enhance population diversity. Experimental results on ten benchmark dataset show that our proposed hybrid method for feature selection can achieve high performance when comparing with other metaheuristic algorithms and well-known feature selection approaches. {\textcopyright} Springer International Publishing AG 2018.}, doi = {10.1007/978-3-319-58253-5_26}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032615163\&doi=10.1007\%2f978-3-319-58253-5_26\&partnerID=40\&md5=45163b8637f55b8af04f902f4f33afc2}, author = {Sarhani, M. and El Afia, A. and Faizi, R.} } @conference {Khaldi2018, title = {Feedforward and recurrent neural networks for time series forecasting: Comparative study}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230946}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053485247\&doi=10.1145\%2f3230905.3230946\&partnerID=40\&md5=6fb59f2f8ddf603c98e96d68b1a47ee4}, author = {Khaldi, R. and Chiheb, R. and Afia, A.E.} } @article {Elmouhtadi2018435, title = {Fingerprint identification based on hierarchical triangulation}, journal = {Journal of Information Processing Systems}, volume = {14}, number = {2}, year = {2018}, pages = {435-447}, doi = {10.3745/JIPS.02.0084}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046883308\&doi=10.3745\%2fJIPS.02.0084\&partnerID=40\&md5=98878a4e67240c4ddadb744f90dab887}, author = {Elmouhtadi, M. and El Fkihi, S. and Aboutajdine, D.} } @article {Elmouhtadi2018393, title = {Fingerprint identification using hierarchical matching and topological structures}, journal = {Studies in Computational Intelligence}, volume = {730}, year = {2018}, pages = {393-408}, doi = {10.1007/978-3-319-63754-9_18}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032002757\&doi=10.1007\%2f978-3-319-63754-9_18\&partnerID=40\&md5=e4d45320fd26c5982556a97c07bc754d}, author = {Elmouhtadi, M. and El Fkihi, S. and Aboutajdine, D.} } @article {Elmouhtadi2018393, title = {Fingerprint identification using hierarchical matching and topological structures}, journal = {Studies in Computational Intelligence}, volume = {730}, year = {2018}, note = {cited By 0}, pages = {393-408}, abstract = {Fingerprint identification is one of the most popular and efficient biometric techniques used for improving automatic personal identification. In this paper, we will present a new indexing method, based on estimation of singular point considered as an important feature in the fingerprint by using the directional file. On the other hand, a hierarchical Delaunay triangulation is applied on the minutiae around the extracted singular point. Two fingerprints calculated by introducing the barycenter notion to ensure the exact location of the similar triangles is compared. We have performed extensive experiments and comparisons to demonstrate the effectiveness of the proposed approach using a challenging public database (i.e., FVC2000), which contains small area and low-quality fingerprints. {\textcopyright} Springer International Publishing AG 2018.}, doi = {10.1007/978-3-319-63754-9_18}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032002757\&doi=10.1007\%2f978-3-319-63754-9_18\&partnerID=40\&md5=e4d45320fd26c5982556a97c07bc754d}, author = {Elmouhtadi, M. and El Fkihi, S. and Aboutajdine, D.} } @conference {Khaldi2018, title = {Forecasting of bitcoin daily returns with eemd-elman based model}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230948}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484904\&doi=10.1145\%2f3230905.3230948\&partnerID=40\&md5=0f2e4191d42a3eaf7857e8a6821b5f27}, author = {Khaldi, R. and Afia, A.E. and Chiheb, R. and Faizi, R.} } @conference {Mohajir2018, title = {Foreword}, booktitle = {19th IEEE Mediterranean Eletrotechnical Conference, MELECON 2018 - Proceedings}, year = {2018}, doi = {10.1109/MELCON.2018.8379052}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050116113\&doi=10.1109\%2fMELCON.2018.8379052\&partnerID=40\&md5=5d16db59f8264ed3e9ce60f54b800e6a}, author = {Mohajir, M.E. and Essaaidi, M.M. and Assem, N. and Benjillali, M. and Aniba, G. and Belmekki, A. and Issati, O.E.} } @article {Amraoui20186945, title = {A Forwarding Game Approach for Reducing Topology Control Traffic in MANETs}, journal = {Arabian Journal for Science and Engineering}, volume = {43}, number = {12}, year = {2018}, pages = {6945-6961}, doi = {10.1007/s13369-017-2910-7}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85056214456\&doi=10.1007\%2fs13369-017-2910-7\&partnerID=40\&md5=1dfa43ead71bc316da85208bc3a3c893}, author = {Amraoui, H. and Habbani, A. and Hajami, A.} } @conference {Sarhani2018, title = {Generalization enhancement of support vector regression in electric load forecasting with model selection}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230947}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484922\&doi=10.1145\%2f3230905.3230947\&partnerID=40\&md5=a29a4cececf76f665c91e09b16ba8692}, author = {Sarhani, M. and Afia, A.E.} } @conference {Illi20181, title = {A high accuracy solver for RTE in underwater optical communication path loss prediction}, booktitle = {Proceedings - 2018 International Conference on Advanced Communication Technologies and Networking, CommNet 2018}, year = {2018}, pages = {1-8}, doi = {10.1109/COMMNET.2018.8360253}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048315000\&doi=10.1109\%2fCOMMNET.2018.8360253\&partnerID=40\&md5=12749d7368596843ee644e3a2d679eec}, author = {Illi, E. and El Bouanani, F. and Ayoub, F.} } @conference {Houari20181, title = {Hybrid big data warehouse for on-demand decision needs}, booktitle = {Proceedings of 2017 International Conference on Electrical and Information Technologies, ICEIT 2017}, volume = {2018-January}, year = {2018}, pages = {1-6}, doi = {10.1109/EITech.2017.8255261}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047477758\&doi=10.1109\%2fEITech.2017.8255261\&partnerID=40\&md5=f88360d6e054f43cc33c4593431fa891}, author = {Houari, M.E. and Rhanoui, M. and Asri, B.E.} } @conference {Abnane20181015, title = {Improved analogy-based effort estimation with incomplete mixed data}, booktitle = {Proceedings of the 2018 Federated Conference on Computer Science and Information Systems, FedCSIS 2018}, year = {2018}, pages = {1015-1024}, doi = {10.15439/2018F95}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057257109\&doi=10.15439\%2f2018F95\&partnerID=40\&md5=04ce7873db15a5eed3fd50a320b73e4f}, author = {Abnane, I. and Idri, A.} } @article {Bousqaoui2018626, title = {Information sharing as a coordination tool in supply chain using multi-agent system and neural networks}, journal = {Advances in Intelligent Systems and Computing}, volume = {745}, year = {2018}, pages = {626-632}, doi = {10.1007/978-3-319-77703-0_62}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045145521\&doi=10.1007\%2f978-3-319-77703-0_62\&partnerID=40\&md5=3a6c9dcdd6ba98d43e615abe474572b2}, author = {Bousqaoui, H. and Slimani, I. and Achchab, S.} } @article {Landrigan2018462, title = {The Lancet Commission on pollution and health}, journal = {The Lancet}, volume = {391}, number = {10119}, year = {2018}, pages = {462-512}, doi = {10.1016/S0140-6736(17)32345-0}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031763844\&doi=10.1016\%2fS0140-6736\%2817\%2932345-0\&partnerID=40\&md5=a37689cb0e79594159ee6f8afc55fae1}, author = {Landrigan, P.J. and Fuller, R. and Acosta, N.J.R. and Adeyi, O. and Arnold, R. and Basu, N.N. and Bald{\'e}, A.B. and Bertollini, R. and Bose-O{\textquoteright}Reilly, S. and Boufford, J.I. and Breysse, P.N. and Chiles, T. and Mahidol, C. and Coll-Seck, A.M. and Cropper, M.L. and Fobil, J. and Fuster, V. and Greenstone, M. and Haines, A. and Hanrahan, D. and Hunter, D. and Khare, M. and Krupnick, A. and Lanphear, B. and Lohani, B. and Martin, K. and Mathiasen, K.V. and McTeer, M.A. and Murray, C.J.L. and Ndahimananjara, J.D. and Perera, F. and Poto{\v c}nik, J. and Preker, A.S. and Ramesh, J. and Rockstr{\"o}m, J. and Salinas, C. and Samson, L.D. and Sandilya, K. and Sly, P.D. and Smith, K.R. and Steiner, A. and Stewart, R.B. and Suk, W.A. and van Schayck, O.C.P. and Yadama, G.N. and Yumkella, K. and Zhong, M.} } @conference {Bousqaoui20181, title = {Machine learning applications in supply chains: An emphasis on neural network applications}, booktitle = {Proceedings of 2017 International Conference of Cloud Computing Technologies and Applications, CloudTech 2017}, volume = {2018-January}, year = {2018}, pages = {1-7}, doi = {10.1109/CloudTech.2017.8284722}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046694532\&doi=10.1109\%2fCloudTech.2017.8284722\&partnerID=40\&md5=606fee01fc33846e4a9726b198e91983}, author = {Bousqaoui, H. and Achchab, S. and Tikito, K.} } @conference {Assila20181, title = {A Many-To-One Matching Game Approach to Achieve Low-Latency Exploiting Fogs and Caching}, booktitle = {2018 9th IFIP International Conference on New Technologies, Mobility and Security, NTMS 2018 - Proceedings}, volume = {2018-January}, year = {2018}, pages = {1-2}, doi = {10.1109/NTMS.2018.8328671}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051102582\&doi=10.1109\%2fNTMS.2018.8328671\&partnerID=40\&md5=93f51b2e077f7f00d3d0e5e5a8123950}, author = {Assila, B. and Kobbane, A. and El Koutbi, M.} } @article {Cheikhi201836, title = {Measurement based E-government portals{\textquoteright} benchmarking framework: Architectural and procedural views}, journal = {Advances in Intelligent Systems and Computing}, volume = {746}, year = {2018}, pages = {36-45}, doi = {10.1007/978-3-319-77712-2_4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045330135\&doi=10.1007\%2f978-3-319-77712-2_4\&partnerID=40\&md5=941334f001bfb6ab10e687f93e3f62a7}, author = {Cheikhi, L. and Fath-Allah, A. and Idri, A. and Al-Qutaish, R.E.} } @conference {Kabbaj2018, title = {Model selection for learning branch-and-cut strategies}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230908}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484527\&doi=10.1145\%2f3230905.3230908\&partnerID=40\&md5=1245860bc8014f28cf4be5f9a62a81ef}, author = {Kabbaj, M.M. and El Afia, A.} } @conference {Kharchouf2018, title = {Modeling and PI Control Strategy of DFIG Based Wind Energy Conversion Systems}, booktitle = {Proceedings of 2017 International Renewable and Sustainable Energy Conference, IRSEC 2017}, year = {2018}, doi = {10.1109/IRSEC.2017.8477348}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055863270\&doi=10.1109\%2fIRSEC.2017.8477348\&partnerID=40\&md5=32eba764430e8a533e635724d68c48b5}, author = {Kharchouf, I. and Essadki, A. and Arbaoui, M. and Nasser, T.} } @conference {Zarra201888, title = {MOOCs{\textquoteright} recommendation based on forum latent dirichlet allocation}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, pages = {88-93}, doi = {10.1145/3289100.3289115}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058674172\&doi=10.1145\%2f3289100.3289115\&partnerID=40\&md5=1de27cf008154c73414e9740b9f86e22}, author = {Zarra, T. and Chiheb, R. and Faizi, R. and Afia, A.E.} } @article {Abdellaoui2018180, title = {Multipoint relay selection based on stability of spatial relation in mobile Ad hoc Networks}, journal = {International Journal of Communication Networks and Information Security}, volume = {10}, number = {1}, year = {2018}, pages = {180-187}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046012680\&partnerID=40\&md5=06e3579432a53adb08f8fbb99dd97217}, author = {Abdellaoui, A. and Elmhamdi, J. and Berradi, H.} } @conference {Abdellaoui20181, title = {Multipoint relay selection through estimated spatial relation in smart city environments}, booktitle = {Proceedings - 2018 International Conference on Advanced Communication Technologies and Networking, CommNet 2018}, year = {2018}, pages = {1-10}, doi = {10.1109/COMMNET.2018.8360273}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048348509\&doi=10.1109\%2fCOMMNET.2018.8360273\&partnerID=40\&md5=c1c76aa71528dc64ced3f5bba065ce46}, author = {Abdellaoui, A. and Elmhamdi, J. and Berradi, H.} } @article {Abdellaoui20182718, title = {Multipoint relays selection through stability of estimated spatial relation in mobile ad hoc networks}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {96}, number = {9}, year = {2018}, pages = {2718-2728}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047451405\&partnerID=40\&md5=019296f34f9088bb6f30b2ce2f6dfd11}, author = {Abdellaoui, A. and Elmhamdi, J. and Berradi, H.} } @article {ElAmrani2018190, title = {Multi-stage algorithms for solving a generalized capacitated p-median location problem}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {9}, number = {5}, year = {2018}, pages = {190-196}, doi = {10.14569/IJACSA.2018.090524}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049513825\&doi=10.14569\%2fIJACSA.2018.090524\&partnerID=40\&md5=738659b2530475fe01dab51c8b93c16c}, author = {El Amrani, M. and Benadada, Y.} } @conference {Arbaoui2018, title = {A New Robust Control by Active Disturbance Rejection Control Applied on Wind Turbine System Based on Doubly Fed Induction Generator DFIG}, booktitle = {Proceedings of 2017 International Renewable and Sustainable Energy Conference, IRSEC 2017}, year = {2018}, doi = {10.1109/IRSEC.2017.8477245}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055879902\&doi=10.1109\%2fIRSEC.2017.8477245\&partnerID=40\&md5=6afbcf7b5f6cadf726b9ad8f5f892989}, author = {Arbaoui, M. and Essadki, A. and Kharchouf, I. and Nasser, T.} } @conference {Amine2018, title = {New user association scheme based on multi-objective optimization for 5G ultra-dense multi-RAT HetNets}, booktitle = {IEEE International Conference on Communications}, volume = {2018-May}, year = {2018}, doi = {10.1109/ICC.2018.8422154}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051424239\&doi=10.1109\%2fICC.2018.8422154\&partnerID=40\&md5=3572d774ac502ed9eb6c557554de3420}, author = {Amine, M. and Walid, A. and Kobbane, A. and Ben-Othman, J.} } @conference {Moumen20181, title = {An NLP based text-to-speech synthesizer for Moroccan Arabic}, booktitle = {Proceedings of 2017 International Conference of Cloud Computing Technologies and Applications, CloudTech 2017}, volume = {2018-January}, year = {2018}, pages = {1-5}, doi = {10.1109/CloudTech.2017.8284745}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046676982\&doi=10.1109\%2fCloudTech.2017.8284745\&partnerID=40\&md5=9ca745be5c63a00cda019ef8b0230a9d}, author = {Moumen, R. and Chiheb, R. and Faizi, R. and Afia, A.E.} } @article {Asaad201825, title = {NoSQL databases {\textendash} seek for a design methodology}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {11163 LNCS}, year = {2018}, pages = {25-40}, doi = {10.1007/978-3-030-00856-7_2}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055801711\&doi=10.1007\%2f978-3-030-00856-7_2\&partnerID=40\&md5=7dea46ac42aaa6503942cec88b481ca6}, author = {Asaad, C. and Baina, K.} } @conference {Tahiri20181, title = {A novel detector based on the compact genetic algorithm for MIMO systems}, booktitle = {Proceedings - 2018 International Conference on Advanced Communication Technologies and Networking, CommNet 2018}, year = {2018}, pages = {1-6}, doi = {10.1109/COMMNET.2018.8360279}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048346101\&doi=10.1109\%2fCOMMNET.2018.8360279\&partnerID=40\&md5=80e5367fed32603ca01e87c38d0b8d75}, author = {Tahiri, N. and Azouaoui, A. and Belkasmi, M.} } @conference {Admi2018, title = {A novel mser based method for detecting text in license plates}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230923}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484803\&doi=10.1145\%2f3230905.3230923\&partnerID=40\&md5=e4a83cc458bcdf11fbe9b492ea53bb87}, author = {Admi, M. and Fkihi, S.E. and Faizi, R.} } @conference {Abouyahya2018, title = {An optimization of the k-nearest neighbor using dynamic time warping as a measurement similarity for facial expressions recognition}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230921}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484708\&doi=10.1145\%2f3230905.3230921\&partnerID=40\&md5=2651d0ca787e887d19fd3193ed428623}, author = {Abouyahya, A. and Fkihi, S.E.} } @conference {Nouinou2018, title = {Overview on last advances of feature selection}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230959}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484371\&doi=10.1145\%2f3230905.3230959\&partnerID=40\&md5=e7c10b73c82aca9d8cd0e61aa6e097f4}, author = {Nouinou, S. and Afia, A.E. and Fkihi, S.E.} } @conference {Bouzbita2018, title = {Parameter adaptation for ant colony system algorithm using hidden markov model for tsp problems}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230962}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053485206\&doi=10.1145\%2f3230905.3230962\&partnerID=40\&md5=69e09fa2debc5dc07106895d9ce7948b}, author = {Bouzbita, S. and El Afia, A. and Faizi, R.} } @conference {Illi2018112, title = {On the performance of mixed FSO-UOWC communication system with the presence of pointing error}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, pages = {112-118}, doi = {10.1145/3289100.3289119}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058670470\&doi=10.1145\%2f3289100.3289119\&partnerID=40\&md5=fd9e1ae8efcae85f32f61992bed4ad8d}, author = {Illi, E. and Bouanani, F.E. and Ayoub, F.} } @conference {ElAfia20181, title = {Performance prediction using support vector machine for the configuration of optimization algorithms}, booktitle = {Proceedings of 2017 International Conference of Cloud Computing Technologies and Applications, CloudTech 2017}, volume = {2018-January}, year = {2018}, pages = {1-7}, doi = {10.1109/CloudTech.2017.8284699}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046624408\&doi=10.1109\%2fCloudTech.2017.8284699\&partnerID=40\&md5=13c42fe4df8b128f5a87988605c2485f}, author = {El Afia, A. and Sarhani, M.} } @conference {Assami20181265, title = {Personalization criteria for enhancing learner engagement in MOOC platforms}, booktitle = {IEEE Global Engineering Education Conference, EDUCON}, volume = {2018-April}, year = {2018}, pages = {1265-1272}, doi = {10.1109/EDUCON.2018.8363375}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048130321\&doi=10.1109\%2fEDUCON.2018.8363375\&partnerID=40\&md5=9cfaeac7fd7536644d73cb99c9cbaefc}, author = {Assami, S. and Daoudi, N. and Ajhoun, R.} } @conference {Amraoui20181, title = {Reducing network topology over smart digital mobile environment using linear programming approach}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, pages = {1-8}, doi = {10.1145/3289100.3289101}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058617163\&doi=10.1145\%2f3289100.3289101\&partnerID=40\&md5=2611403282d988e759c8721f91c739e5}, author = {Amraoui, H. and Habbani, A. and Hajami, A.} } @conference {Kerzazi2018, title = {Release engineering: From structural to functional view}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3289402.3289547}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062787123\&doi=10.1145\%2f3289402.3289547\&partnerID=40\&md5=9cbe63a1729a1e49aae60abf735872b1}, author = {Kerzazi, N. and El Asri, I.} } @conference {Amraoui20181, title = {Security \& cooperation mechanisms over mobile ad hoc networks: A survey and challenges}, booktitle = {Proceedings of 2017 International Conference on Electrical and Information Technologies, ICEIT 2017}, volume = {2018-January}, year = {2018}, pages = {1-6}, doi = {10.1109/EITech.2017.8255313}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047364198\&doi=10.1109\%2fEITech.2017.8255313\&partnerID=40\&md5=16f5590d7460c7a1606e951c65b604b4}, author = {Amraoui, H. and Habbani, A. and Hajami, A. and Bilal, E.} } @conference {Lalaoui20181, title = {A self-adaptive very fast simulated annealing based on Hidden Markov model}, booktitle = {Proceedings of 2017 International Conference of Cloud Computing Technologies and Applications, CloudTech 2017}, volume = {2018-January}, year = {2018}, pages = {1-8}, doi = {10.1109/CloudTech.2017.8284698}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046690728\&doi=10.1109\%2fCloudTech.2017.8284698\&partnerID=40\&md5=019d60ddd6cdf20019791974755a6d33}, author = {Lalaoui, M. and El Afia, A. and Chiheb, R.} } @article {Lalaoui2018291, title = {A self-tuned simulated annealing algorithm using hidden markov model}, journal = {International Journal of Electrical and Computer Engineering}, volume = {8}, number = {1}, year = {2018}, pages = {291-298}, doi = {10.11591/ijece.v8i1.pp291-298}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042923643\&doi=10.11591\%2fijece.v8i1.pp291-298\&partnerID=40\&md5=804e27501baca7ca4efeeaa3d1ff0ef1}, author = {Lalaoui, M. and El Afia, A. and Chiheb, R.} } @article {Lalaoui2018291, title = {A self-tuned simulated annealing algorithm using hidden markov model}, journal = {International Journal of Electrical and Computer Engineering}, volume = {8}, number = {1}, year = {2018}, note = {cited By 0}, pages = {291-298}, abstract = {Simulated Annealing algorithm (SA) is a well-known probabilistic heuristic. It mimics the annealing process in metallurgy to approximate the global minimum of an optimization problem. The SA has many parameters which need to be tuned manually when applied to a specific problem. The tuning may be difficult and time-consuming. This paper aims to overcome this difficulty by using a self-tuning approach based on a machine learning algorithm called Hidden Markov Model (HMM). The main idea is allowing the SA to adapt his own cooling law at each iteration, according to the search history. An experiment was performed on many benchmark functions to show the efficiency of this approach compared to the classical one. {\textcopyright} 2018 Institute of Advanced Engineering and Science.}, doi = {10.11591/ijece.v8i1.pp291-298}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042923643\&doi=10.11591\%2fijece.v8i1.pp291-298\&partnerID=40\&md5=804e27501baca7ca4efeeaa3d1ff0ef1}, author = {Lalaoui, M. and El Afia, A. and Chiheb, R.} } @article {Niharmine20181174, title = {Simulated annealing decoder for linear block codes}, journal = {Journal of Computer Science}, volume = {14}, number = {8}, year = {2018}, pages = {1174-1189}, doi = {10.3844/jcssp.2018.1174.1189}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052851664\&doi=10.3844\%2fjcssp.2018.1174.1189\&partnerID=40\&md5=fe2215ebe454f68d001cef85f5f26095}, author = {Niharmine, L. and Bouzkraoui, H. and Azouaoui, A. and Hadi, Y.} } @article {Bouzkraoui20183994, title = {Soft decision decoding of linear block codes using memetic algorithms}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {96}, number = {13}, year = {2018}, pages = {3994-4003}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050164403\&partnerID=40\&md5=a229b49e1f514877e8d3bf865c806ede}, author = {Bouzkraoui, H. and Azouaoui, A. and Hadi, Y. and Niharmine, L.} } @article {Sayagh2018, title = {Software Configuration Engineering in Practice: Interviews, Survey, and Systematic Literature Review}, journal = {IEEE Transactions on Software Engineering}, year = {2018}, doi = {10.1109/TSE.2018.2867847}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052624554\&doi=10.1109\%2fTSE.2018.2867847\&partnerID=40\&md5=f1b2d51b59522e24e5c52f064a62d00b}, author = {Sayagh, M. and Kerzazi, N. and Adams, B. and Petrillo, F.} } @conference {ElAmrani20181, title = {Solving the multi-capacitated location problem using a Lagrangian relaxation approach}, booktitle = {Proceedings - GOL 2018: 4th IEEE International Conference on Logistics Operations Management}, year = {2018}, pages = {1-5}, doi = {10.1109/GOL.2018.8378083}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050006113\&doi=10.1109\%2fGOL.2018.8378083\&partnerID=40\&md5=a6806947bd9a072c9afab65cb312d06a}, author = {El Amrani, M. and Benadada, Y.} } @article {Angeler2018, title = {Sonifying social-ecological change: A wetland laments agricultural transformation}, journal = {Ecology and Society}, volume = {23}, number = {2}, year = {2018}, doi = {10.5751/ES-10055-230220}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049607952\&doi=10.5751\%2fES-10055-230220\&partnerID=40\&md5=d13c737c6c7af34594b114006ad1b2a5}, author = {Angeler, D.G. and Alvarez-Cobelas, M. and S{\'a}nchez-Carrillo, S.} } @conference {Abdellaoui2018, title = {Spatial relation expiration time to select multipoint relays in smart city environments}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3286606.3286829}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059884352\&doi=10.1145\%2f3286606.3286829\&partnerID=40\&md5=061c6da5dd438054e59828b195e9a9c5}, author = {Abdellaoui, A. and Elmhamdi, J. and Berradi, H.} } @article {Idri2018, title = {Support vector regression-based imputation in analogy-based software development effort estimation}, journal = {Journal of Software: Evolution and Process}, volume = {30}, number = {12}, year = {2018}, doi = {10.1002/smr.2114}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058493764\&doi=10.1002\%2fsmr.2114\&partnerID=40\&md5=09059bd4e9087fda5b7225f1c8aeea9d}, author = {Idri, A. and Abnane, I. and Abran, A.} } @conference {Niharmine20181, title = {Tifinagh handwritten character recognition using genetic algorithms}, booktitle = {Proceedings - 2018 International Conference on Advanced Communication Technologies and Networking, CommNet 2018}, year = {2018}, pages = {1-6}, doi = {10.1109/COMMNET.2018.8360267}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048318442\&doi=10.1109\%2fCOMMNET.2018.8360267\&partnerID=40\&md5=5073236df8a21a7f3797ced0a0d435ec}, author = {Niharmine, L. and Outtaj, B. and Azouaoui, A.} } @article {Aoun2018331, title = {Time-dependence in multi-agent MDP applied to gate assignment problem}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {9}, number = {2}, year = {2018}, pages = {331-340}, doi = {10.14569/IJACSA.2018.090247}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049525520\&doi=10.14569\%2fIJACSA.2018.090247\&partnerID=40\&md5=18412e008a36b0bc8220a03385704b3a}, author = {Aoun, O. and Afia, A.E.} } @conference {ElHaddaoui2018, title = {Toward a sentiment analysis framework for social media}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, doi = {10.1145/3230905.3230919}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053484908\&doi=10.1145\%2f3230905.3230919\&partnerID=40\&md5=c602c6dc79537942d4d5ac0b9e4a0197}, author = {El Haddaoui, B. and Chiheb, R. and Faizi, R. and Afia, A.E.} } @conference {Boulmrharj2018, title = {Towards a battery characterization methodology for performance evaluation of micro-grid systems}, booktitle = {2018 International Conference on Smart Energy Systems and Technologies, SEST 2018 - Proceedings}, year = {2018}, doi = {10.1109/SEST.2018.8495829}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85056492547\&doi=10.1109\%2fSEST.2018.8495829\&partnerID=40\&md5=f25a82d795aa6991c23f1064013cde45}, author = {Boulmrharj, S. and NaitMalek, Y. and Mouatamid, A.E. and Ouladsine, R. and Bakhouya, M. and Ouldmoussa, M. and Zine-Dine, K. and Khaidar, M. and Abid, R.} } @conference {Elmouatamid2018984, title = {Towards a Demand/Response Control Approach for Micro-grid Systems}, booktitle = {2018 5th International Conference on Control, Decision and Information Technologies, CoDIT 2018}, year = {2018}, pages = {984-988}, doi = {10.1109/CoDIT.2018.8394951}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050227456\&doi=10.1109\%2fCoDIT.2018.8394951\&partnerID=40\&md5=97a6b224a481de8a0d5d91a46b76b895}, author = {Elmouatamid, A. and NaitMalek, Y. and Ouladsine, R. and Bakhouya, M. and Elkamoun, N. and Zine-Dine, K. and Khaidar, M. and Abid, R.} } @conference {Bahadi201842, title = {Towards efficient big data: Hadoop data placing and processing}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, pages = {42-47}, doi = {10.1145/3289100.3289108}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058656669\&doi=10.1145\%2f3289100.3289108\&partnerID=40\&md5=52e6fda5215333977a9413c6cb1ff547}, author = {Bahadi, J. and Asri, B.E. and Courtine, M. and Rhanoui, M. and Kergosien, Y.} } @conference {Abouaomar201814, title = {Users-Fogs association within a cache context in 5G networks:Coalition game model}, booktitle = {Proceedings - IEEE Symposium on Computers and Communications}, volume = {2018-June}, year = {2018}, pages = {14-19}, doi = {10.1109/ISCC.2018.8538500}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059221795\&doi=10.1109\%2fISCC.2018.8538500\&partnerID=40\&md5=e324f3454d662cef58a9c78528c68a60}, author = {Abouaomar, A. and Elmachkour, M. and Kobbane, A. and Tembine, H. and Ayaida, M.} } @article {Hosni20185977, title = {On the value of parameter tuning in heterogeneous ensembles effort estimation}, journal = {Soft Computing}, volume = {22}, number = {18}, year = {2018}, pages = {5977-6010}, doi = {10.1007/s00500-017-2945-4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035807309\&doi=10.1007\%2fs00500-017-2945-4\&partnerID=40\&md5=83d094f56b80a21295cf4230621f445d}, author = {Hosni, M. and Idri, A. and Abran, A. and Nassif, A.B.} } @conference {Mohammed201876, title = {The vocabulary and the morphology in spell checker}, booktitle = {Procedia Computer Science}, volume = {127}, year = {2018}, pages = {76-81}, doi = {10.1016/j.procs.2018.01.100}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045634856\&doi=10.1016\%2fj.procs.2018.01.100\&partnerID=40\&md5=451f8f1e196430fe5e79a3d99f624f82}, author = {Mohammed, N. and Abdellah, Y.} } @conference {Khelifa2017211, title = {An accurate HSMM-based system for Arabic phonemes recognition}, booktitle = {9th International Conference on Advanced Computational Intelligence, ICACI 2017}, year = {2017}, note = {cited By 1}, pages = {211-216}, abstract = {The majority of successful automatic speech recognition (ASR) systems utilize a probabilistic modeling of the speech signal via hidden Markov models (HMMs). In a standard HMM model, state duration probabilities decrease exponentially with time, which fails to satisfactorily describe the temporal structure of speech. Incorporating explicit state durational probability distribution functions (pdf) into the HMM is a famous solution to overcome this feebleness. This way is well-known as a hidden semi-Markov model (HSMM). Previous papers have confirmed that using HSMM models instead of the standard HMMs have enhanced the recognition accuracy in many targeted languages. This paper addresses an important stage of our on-going work which aims to construct an accurate Arabic recognizer for teaching and learning purposes. It presents an implementation of an HSMM model whose principal goal is improving the classical HMM{\textquoteright}s durational behavior. In this implementation, the Gaussian distribution is used for modeling state durations. Experiments have been carried out on a particular Arabic speech corpus collected from recitations of the Holy Quran. Results show an increase in recognition accuracy by around 1\% We confirmed via these results that such a system outperforms the baseline HTK when the Gaussian distribution is integrated into the HTK{\textquoteright}s recognizer back-end. {\textcopyright} 2017 IEEE.}, doi = {10.1109/ICACI.2017.7974511}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027464290\&doi=10.1109\%2fICACI.2017.7974511\&partnerID=40\&md5=169e067b67861858798f016d82e0e080}, author = {Khelifa, M.O.M. and Belkasmi, M. and Abdellah, Y. and Elhadj, Y.O.M.} } @conference {Khaldi2017, title = {Artificial neural network based approach for blood demand forecasting: Fez transfusion blood center case study}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Blood demand and supply management are considered one of the major components of a healthcare supply chain, since blood is a vital element in preserving patient{\textquoteright}s life. However, forecasting it faces several challenges including frequent shortages, and possible expiration caused by demand uncertainty of hospitals. This uncertainty is mainly due to high variability in the number of emergency cases. Thereupon, this investigation presents a real case study of forecasting monthly demand of three blood components, using Artificial Neural Networks (ANNs). The demand of the three blood components (red blood cells (RBC), plasma (CP) and platelets (PFC)) and other observations are obtained from a central transfusion blood center and a University Hospital. Experiments are carried out using three networks to forecast each blood component separately. Last, the presented model is compared with ARIMA to evaluate its performance in prediction. The results of this study depict that ANN models overcomes ARIMA models in demand forecasting. Thus high ANN models can be considered as a promising approach in forecasting monthly blood demand. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090415}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028449243\&doi=10.1145\%2f3090354.3090415\&partnerID=40\&md5=7ab92ea6ba7c2f5837ee88fcb2a91ffc}, author = {Khaldi, R. and El Afia, A. and Chiheb, R. and Faizi, R.} } @article {Ayache2017166, title = {Aspect-oriented state machines for resolving conflicts in XACML policies}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {10299 LNCS}, year = {2017}, note = {cited By 0}, pages = {166-171}, abstract = {Authorizationin collaborativesystemsisdefinedbyaglobal policy that represents the combination of the collaborators{\textquoteright} access policies. However, the enforcement of such a global policy may create conflicting authorization decisions. In this paper, we categorize two types of conflicts that may occur in such policies. Furthermore, to resolve these conflicts and to reach a unique decision for an access request, we present an approach that uses XACML policy combining algorithms and considers the category of the detected conflicts. The approach is implemented using aspect-oriented finite state machines. {\textcopyright} Springer International Publishing AG 2017.}, doi = {10.1007/978-3-319-59647-1_13}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019731058\&doi=10.1007\%2f978-3-319-59647-1_13\&partnerID=40\&md5=917cf51c38e853e2423ad411ec1ecadb}, author = {Ayache, M. and Erradi, M. and Freisleben, B. and Khoumsi, A.} } @conference {Illi2017855, title = {Asymptotic analysis of underwater communication system subject to κ-μ Shadowed fading channel}, booktitle = {2017 13th International Wireless Communications and Mobile Computing Conference, IWCMC 2017}, year = {2017}, note = {cited By 0}, pages = {855-860}, abstract = {In this paper, a unified performance analysis of underwater wireless communication system is presented. The radio link between the source (S) and the destination (D) is subject to κ-μ shadowed fading channel. We present an analytical closed-form expression for the cumulative distribution function (CDF) and the probability density function (PDF) of the total end-to-end SNR in terms of the Kummer{\textquoteright}s Hypergeometric function. Based on these results, we present exact closed-form expressions of communication system performance criteria, such as outage probability (OP) and average bit/symbol error rate (ABER/ASER) with their respective asymptotic expansion, at both low and high SNR regimes, expressed in terms of basic elementary functions. All the derived analytical expressions are validated through computer-based simulation. {\textcopyright} 2017 IEEE.}, doi = {10.1109/IWCMC.2017.7986397}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027884347\&doi=10.1109\%2fIWCMC.2017.7986397\&partnerID=40\&md5=c5c5b9d26646e4c5a9f570c929975baf}, author = {Illi, E. and El Bouanani, F. and Ayoub, F.} } @conference {Elmaroud2017, title = {BER analysis of FBMC based multi-cellular networks in the presence of synchronization errors and HPA NLD}, booktitle = {IEEE Vehicular Technology Conference}, year = {2017}, note = {cited By 1}, abstract = {In this paper, we study the performance of asynchronous and non linear FBMC based multi-cellular networks. The considered system consists of a reference mobile user perfectly synchronized with its reference base station (BS) and K interfering BSS. We have considered both synchronization errors and high power amplifiers (HPA) non linear distortions (NLD). We analyzed first the signal to noise plus interference ratio (SINR). On the basis of this analysis, derived an accurate of bit error rate (BER) in the presence of a frequency selective channel. In order to reduce the computational complexity of the BER expression, we applied an interesting lemma based on the moment generating function of the interference power. Finally, the proposed model is evaluated through computer simulations which show a high sensitivity of the asynchronous FBMC based multi-cellular network to HPA non linear distortions. {\textcopyright} 2016 IEEE.}, doi = {10.1109/VTCFall.2016.7880922}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016936548\&doi=10.1109\%2fVTCFall.2016.7880922\&partnerID=40\&md5=170d934dcec5039d0265f9b0310630c6}, author = {Elmaroud, B. and Faqihi, M.A. and Abbad, M. and Aboutajdine, D.} } @conference {Fath-ALLAH2017326, title = {A Best Practice Based E-Government Portals{\textquoteright} Maturity Model-A Case Study}, booktitle = {IEEE CIT 2017 - 17th IEEE International Conference on Computer and Information Technology}, year = {2017}, note = {cited By 0}, pages = {326-331}, abstract = {An e-government portal{\textquoteright}s maturity model is a set of incremental stages that defines the maturity of e-government portals. In fact, these models can be used by e-government agencies to identify the maturity rank of their portal and at the same time provide recommendations and guidelines for agencies to improve their portals{\textquoteright} maturity. In previous research studies, we have built an e-government portals{\textquoteright} best practice model and a maturity model that is based on the former model. Moreover, we have evaluated the model by a set of experts in the e-government field. The model has proved its validity and reliability within the set of the participants. The aim of this paper is to perform a case study on this maturity model on a real portal. Our findings show that the portal being measured is in the presence stage of maturity. {\textcopyright} 2017 IEEE.}, doi = {10.1109/CIT.2017.23}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032359695\&doi=10.1109\%2fCIT.2017.23\&partnerID=40\&md5=765abc46de7761960104adbb2b22b013}, author = {Fath-Allah, A. and Cheikhi, L. and Idri, A. and Al-Qutaish, R.} } @article {Aznabet2017137, title = {A broadband modified T-shaped planar dipole antenna for UHF RFID tag applications}, journal = {Progress In Electromagnetics Research C}, volume = {73}, year = {2017}, note = {cited By 0}, pages = {137-144}, abstract = {In this paper, we report a single layer modified T-shaped dipole antenna for UHF-RFID tag applications. The designed RFID tag antenna consists of a pair of T-shaped dipole strips loaded with four half discs patches and a tag chip placed in the center. The antenna{\textquoteright}s size is 80 {\texttimes} 40 {\texttimes} 1.6 mm3. Performance of the proposed design was investigated with simulations and measurements. The main feature of this design is that the RFID tag antenna can operate effectively at 868 MHz and 915 MHz frequency bands which make it broadband. The maximum reading range measured in an anechoic chamber is 4.25 m and 5.27 m at 915 MHz and 867.5 MHz, respectively. Furthermore, the RFID tag antenna can work on metallic plates when inserting a foam spacer between them. The final result has a simple configuration, low profile and can be suitable for practical applications dealing with free-space and metallic objects. {\textcopyright} 2017, Electromagnetics Academy. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018499025\&partnerID=40\&md5=fc169608c0f9d2ca9edc6935cc6a53e4}, author = {Aznabet, I. and Ennasar, M.A. and El Mrabet, O. and Andia Vera, G. and Khalladi, M. and Tedjni, S.} } @conference {Abouaomar2017, title = {Caching, device-to-device and fog computing in 5th cellular networks generation: Survey}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {Many researches and standardization work on the challenges that 5th networks generation raised from the radio perspective while employing advanced techniques such as massive MIMO (Multiple-Input-Multiple-Output) and CoMP (Cooperative Multi-points Processes). However the backhaul problems such as bottlenecks has emerged due to the deployment of ultradense and heavy traffic that should be connected to the core networks. In this paper we investigate the caching as a promising solution to deal with the backhaul problems and the offload of the network. By caching the content near the users, at the base stations or at the device side via device-to-device communications or in advanced architecture of the cloud (In the Fog) is a promising solution to bring the interesting content closer to the users. Caching techniques are many, in this paper we grouped the most interesting ones with regard to different architectures, considering the cases and the quality of the solutions. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238174}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041450036\&doi=10.1109\%2fWINCOM.2017.8238174\&partnerID=40\&md5=08c633efad0f6759f14c9373be0f0b07}, author = {Abouaomar, A. and Filali, A. and Kobbane, A.} } @conference {ElHadj2017548, title = {Clustering-based approach for anomaly detection in XACML policies}, booktitle = {ICETE 2017 - Proceedings of the 14th International Joint Conference on e-Business and Telecommunications}, volume = {4}, year = {2017}, note = {cited By 0}, pages = {548-553}, abstract = {The development of distributed applications arises multiple security issues such as access control. Attribute-Based Access Control has been proposed as a generic access control model, which provides more flexibility and promotes information and security sharing. eXtensible Access Control Markup Language (XACML) is the most convenient way to express ABAC policies. However, in distributed environments, XACML policies become more complex and hard to manage. In fact, an XACML policy in distributed applications may be aggregated from multiple parties and can be managed by more than one administrator. Therefore, it may contain several anomalies such as conflicts and redundancies, which may affect the performance of the policy execution. In this paper, we propose an anomaly detection method based on the decomposition of a policy into clusters before searching anomalies within each cluster. Our evaluation results demonstrate the efficiency of the suggested approach. Copyright {\textcopyright} 2017 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029414026\&partnerID=40\&md5=133521fca6a95f8238cfc3b777dd3534}, author = {El Hadj, M.A. and Ayache, M. and Benkaouz, Y. and Khoumsi, A. and Erradi, M.} } @conference {Zerrouk2017, title = {Collaborative modelling: An MDE-oriented process to manage large-scale models}, booktitle = {2017 International Conference on Wireless Technologies, Embedded and Intelligent Systems, WITS 2017}, year = {2017}, note = {cited By 0}, abstract = {The application of Model Driven Engineering in an industrial context implies working with large models, hence the need for collaborative modeling. Each developer focuses on a part of the large model and may manipulate it independently. As each part evolves rapidly and concurrently due to changes made by different designers, inconsistencies may occur. This work aims to provide support for dealing with concurrent changes. We propose a metamodel for the description of large models, and we also propose a process for the management of concurrent changes. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WITS.2017.7934626}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85021631540\&doi=10.1109\%2fWITS.2017.7934626\&partnerID=40\&md5=21c3bf573defa27ec9db8cab24e64611}, author = {Zerrouk, M. and Anwar, A. and Benelallam, I. and Elhamlaoui, M.} } @article {Achchab2017226, title = {A combination of regression techniques and cuckoo search algorithm for FOREX speculation}, journal = {Advances in Intelligent Systems and Computing}, volume = {569}, year = {2017}, note = {cited By 0}, pages = {226-235}, abstract = {This paper describes a hybrid model formed by a mixture of regression techniques and Cuckoo Search algorithm to speculate USD/EUR variations. Inspired by ARMA model we propose a dataset composed of historical data of USD/EUR and (JYN, EUR and BRP) variations. The dataset is used to train four regression algorithms: Multiple linear regression, Support vector regression, Partial Least Squares regression and CRT regression tree; the generated regression weights of these algorithms will be used as inputs to Cuckoo Search algorithm. The effectiveness of the proposed system against classical regression algorithms is confirmed by experiments on exchange rate prediction within the period from January 2014 to January 2016. {\textcopyright} Springer International Publishing AG 2017.}, doi = {10.1007/978-3-319-56535-4_23}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018486816\&doi=10.1007\%2f978-3-319-56535-4_23\&partnerID=40\&md5=8ec7dbda07a48707f78a6b186721c9bf}, author = {Achchab, S. and Bencharef, O. and Ouaarab, A.} } @conference {Verma2017155, title = {Comprehensive method for detecting phishing emails using correlation-based analysis and user participation}, booktitle = {CODASPY 2017 - Proceedings of the 7th ACM Conference on Data and Application Security and Privacy}, year = {2017}, note = {cited By 0}, pages = {155-157}, abstract = {Phishing email has become a popular solution among attack- ers to steal all kinds of data from people and easily breach organizations{\textquoteright} security system. Hackers use multiple tech- niques and tricks to raise the chances of success of their attacks, like using information found on social networking websites to tailor their emails to the target{\textquoteright}s interests, or targeting employees of an organization who probably can{\textquoteright}t spot a phishing email or malicious websites and avoid sending emails to IT people or employees from Security depart- ment. In this paper we focus on analyzing the coherence of information contained in the different parts of the email: Header, Body, and URLs. After analyzing multiple phish- ing emails we discovered that there is always incoherence between these different parts. We created a comprehensive method which uses a set of rules that correlates the infor- mation collected from analyzing the header, body and URLs of the email and can even include the user in the detection process. We take into account that there is no such thing called perfection, so even if an email is classified as legit-imate, our system will still send a warning to the user if the email is suspicious enough. This way even if a phishing email manages to escape our system, the user can still be protected. {\textcopyright} 2017 ACM.}, doi = {10.1145/3029806.3029842}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018511154\&doi=10.1145\%2f3029806.3029842\&partnerID=40\&md5=abca7a2730527a04bc2711d3b798aaab}, author = {Verma, R. and Aassal, A.E.} } @article {Slimani2017144, title = {Configuration and implementation of a daily artificial neural network-based forecasting system using real supermarket data}, journal = {International Journal of Logistics Systems and Management}, volume = {28}, number = {2}, year = {2017}, note = {cited By 0}, pages = {144-163}, abstract = {The purpose of any effective supply chain is to find balance between supply and demand by coordinating all internal and external processes in order to ensure delivery of the right product, to the right customer, at the best time and with the optimal cost. Therefore, the estimation of future demand is one of the crucial tasks for any organisation of the supply chain system who has to make the correct decision in the appropriate time to enhance its commercial competitiveness. In an earlier study, where various artificial neural networks{\textquoteright} structures are compared including perceptron, adaline, no-propagation, multi layer perceptron (MLP) and radial basis function for demand forecasting, the results indicate that the MLP structure present the best forecasts with the optimal error. Consequently, this paper focuses on realising a daily demand predicting system in a supermarket using MLP by adding inputs including previous demand, days{\textquoteright} classification and average demand quantities. {\textcopyright} Copyright 2017 Inderscience Enterprises Ltd.}, doi = {10.1504/IJLSM.2017.086345}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029227042\&doi=10.1504\%2fIJLSM.2017.086345\&partnerID=40\&md5=ccd93c50554b8b201bac46a1363f9953}, author = {Slimani, I. and El Farissi, I. and Achchab, S.} } @article {Khelifa2017937, title = {Constructing accurate and robust HMM/GMM models for an Arabic speech recognition system}, journal = {International Journal of Speech Technology}, volume = {20}, number = {4}, year = {2017}, note = {cited By 0}, pages = {937-949}, abstract = {Conventional Hidden Markov Model (HMM) based Automatic Speech Recognition (ASR) systems generally utilize cepstral features as acoustic observation and phonemes as basic linguistic units. Some of the most powerful features currently used in ASR systems are Mel-Frequency Cepstral Coefficients (MFCCs). Speech recognition is inherently complicated due to the variability in the speech signal which includes within- and across-speaker variability. This leads to several kinds of mismatch between acoustic features and acoustic models and hence degrades the system performance. The sensitivity of MFCCs to speech signal variability motivates many researchers to investigate the use of a new set of speech feature parameters in order to make the acoustic models more robust to this variability and thus improve the system performance. The combination of diverse acoustic feature sets has great potential to enhance the performance of ASR systems. This paper is a part of ongoing research efforts aspiring to build an accurate Arabic ASR system for teaching and learning purposes. It addresses the integration of complementary features into standard HMMs for the purpose to make them more robust and thus improve their recognition accuracies. The complementary features which have been investigated in this work are voiced formants and Pitch in combination with conventional MFCC features. A series of experimentations under various combination strategies were performed to determine which of these integrated features can significantly improve systems performance. The Cambridge HTK tools were used as a development environment of the system and experimental results showed that the error rate was successfully decreased, the achieved results seem very promising, even without using language models. {\textcopyright} 2017, Springer Science+Business Media, LLC.}, doi = {10.1007/s10772-017-9456-7}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029588089\&doi=10.1007\%2fs10772-017-9456-7\&partnerID=40\&md5=2479bd9e84585d976f41dd5488eb12c2}, author = {Khelifa, M.O.M. and Elhadj, Y.M. and Abdellah, Y. and Belkasmi, M.} } @conference {ElFarissi2017122, title = {Coordination by sharing demand forecasts in a supply chain using game theoretic approach}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {122-127}, abstract = {Through the literature, authors give a considerable attention to game theory because of its wide range of applications in various fields including economics, political science, psychology or biology. The aim of this case study is to employ game theoretic approach to model information sharing as a coordination mechanism in a basic two-echelon supply chain composed of a single retailer and a single supplier. This paper is the sequel of previous works; where demand is forecasted based on historical data of a supermarket in Morocco using the Multi Layer Perceptron structure of the artificial neural networks. Nevertheless, this work focuses on the implementation of the obtained forecasting results in the studied system modeled as a game of two players with asymmetric and imperfect information, in order to find the equilibrium of the game that guaranties maximum payoff for both players. In fact, this is what game theory is all about. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7805028}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010210515\&doi=10.1109\%2fCIST.2016.7805028\&partnerID=40\&md5=79f1785352cd1586a09b76b45cdd93d1}, author = {El Farissi, I. and Slimani, I. and Achchab, S.} } @conference {ElFarissi2017122, title = {Coordination by sharing demand forecasts in a supply chain using game theoretic approach}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {122-127}, abstract = {Through the literature, authors give a considerable attention to game theory because of its wide range of applications in various fields including economics, political science, psychology or biology. The aim of this case study is to employ game theoretic approach to model information sharing as a coordination mechanism in a basic two-echelon supply chain composed of a single retailer and a single supplier. This paper is the sequel of previous works; where demand is forecasted based on historical data of a supermarket in Morocco using the Multi Layer Perceptron structure of the artificial neural networks. Nevertheless, this work focuses on the implementation of the obtained forecasting results in the studied system modeled as a game of two players with asymmetric and imperfect information, in order to find the equilibrium of the game that guaranties maximum payoff for both players. In fact, this is what game theory is all about. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7805028}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010210515\&doi=10.1109\%2fCIST.2016.7805028\&partnerID=40\&md5=79f1785352cd1586a09b76b45cdd93d1}, author = {El Farissi, I.a and Slimani, I.b and Achchab, S.a} } @conference {Sayagh2017255, title = {On cross-stack configuration errors}, booktitle = {Proceedings - 2017 IEEE/ACM 39th International Conference on Software Engineering, ICSE 2017}, year = {2017}, note = {cited By 0}, pages = {255-265}, abstract = {Today{\textquoteright}s web applications are deployed on powerful software stacks such as MEAN (JavaScript) or LAMP (PHP), which consist of multiple layers such as an operating system, web server, database, execution engine and application framework, each of which provide resources to the layer just above it. These powerful software stacks unfortunately are plagued by so-called cross-stack configuration errors (CsCEs), where a higher layer in the stack suddenly starts to behave incorrectly or even crash due to incorrect configuration choices in lower layers. Due to differences in programming languages and lack of explicit links between configuration options of different layers, sysadmins and developers have a hard time identifying the cause of a CsCE, which is why this paper (1) performs a qualitative analysis of 1,082 configuration errors to understand the impact, effort and complexity of dealing with CsCEs, then (2) proposes a modular approach that plugs existing source code analysis (slicing) techniques, in order to recommend the culprit configuration option. Empirical evaluation of this approach on 36 real CsCEs of the top 3 LAMP stack layers shows that our approach reports the misconfigured option with an average rank of 2.18 for 32 of the CsCEs, and takes only few minutes, making it practically useful. {\textcopyright} 2017 IEEE.}, doi = {10.1109/ICSE.2017.31}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027717403\&doi=10.1109\%2fICSE.2017.31\&partnerID=40\&md5=fa56524dafc8d93876ecd8c56f396da3}, author = {Sayagh, M. and Kerzazi, N. and Adams, B.} } @conference {ElAfia2017, title = {Data-driven based aircraft maintenance routing by markov decision process model}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Aircraft maintenance routing is of basic significance to the safe and efficient operations of an airline. However, the timely efficiency of the airline flight schedule is susceptible to various factors during the daily operations. Air traffic often undergoes some random disruptions that expose maintenance routing to random flight delays, which have to be considered to ensure safe and operational flight schedule. The idea of data-driven methods was the focal point of much studies during a previous couple of years. Constrained Markov Decision process model was selected in this paper to remedy this problem and design the maintenance needs of an aircraft taking past data information into account. Maintenance actions are so modeled with stochastic state transitions. This can offer the opportunity to solve the maintenance routing problem deliberating and handling flight disturbances. Through computational tests on real data of a Moroccan airline company, we investigate the efficiency of this solution approach on history data sets. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090430}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028455379\&doi=10.1145\%2f3090354.3090430\&partnerID=40\&md5=b414d040a3e32989b0121144dbd1f7f3}, author = {El Afia, A. and Aoun, O.} } @article {Laghouaouta2017142, title = {A dedicated approach for model composition traceability}, journal = {Information and Software Technology}, volume = {91}, year = {2017}, note = {cited By 0}, pages = {142-159}, abstract = {Context: Software systems are often too complex to be expressed by a single model. Recognizing this, the Model Driven Engineering (MDE) proposes multi-modeling approaches to allow developers to describe a system from different perspectives. In this context, model composition has become important since the combination of those partial representations is inevitable. Nevertheless, no approach has been defined for keeping track of the composition effects, and this operation has been overshadowed by model transformations. Objective This paper presents a traceability approach dedicated to the composition of models. Two aspects of quality are considered: producing relevant traces; and dealing with scalability. Method The composition of softgoal trees has been selected to motivate the need for tracing the composition of models and to illustrate our approach. The base principle is to augment the specification of the composition with the behavior needed to generate the expected composed model accompanied with a trace model. This latter includes traces of the execution details. For that, traceability is considered as a crosscutting concern and encapsulated in an aspect. As part of the proposal, an Eclipse plug-in has been implemented as a tool support. Besides, a comparative experiment has been conducted to assess the traces relevance. We also used the regression method to validate the scalability of the tool support. Results Our experiments show that the proposed approach allows generating relevant traces. In addition, the obtained results reveal that tracing a growing number of elements causes an acceptable increase of response time. Conclusion This paper presents a traceability approach dedicated to the composition of models and its application to softgoal trees. The experiment results reveal that our proposal considers the composition specificities for producing valuable traceability information while supporting scalability. {\textcopyright} 2017 Elsevier B.V.}, doi = {10.1016/j.infsof.2017.07.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85021928111\&doi=10.1016\%2fj.infsof.2017.07.002\&partnerID=40\&md5=efd71b14c98f0091be2f0f61bbb50a3f}, author = {Laghouaouta, Y. and Anwar, A. and Nassar, M. and Coulette, B.} } @conference {Bouzbita2017344, title = {Dynamic adaptation of the ACS-TSP local pheromone decay parameter based on the Hidden Markov Model}, booktitle = {Proceedings of 2016 International Conference on Cloud Computing Technologies and Applications, CloudTech 2016}, year = {2017}, note = {cited By 1}, pages = {344-349}, abstract = {The objective of the present paper is to propose an improved Ant Colony System (ACS) algorithm based on a Hidden Markov Model (HMM) so as dynamically adapt the local pheromone decay parameter ξ. The proposed algorithm uses Iteration and Diversity as indicators of the hidden states in the search space in ACS. To test the efficiency of our algorithm, we experimented it on several benchmark Travelling Salesman Problem (TSP) instances. The results have proven the effectiveness of our algorithm in both the convergence speed and the solution quality. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CloudTech.2016.7847719}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013861130\&doi=10.1109\%2fCloudTech.2016.7847719\&partnerID=40\&md5=26cafb2d23ee70bf2f3552d7fec22e8a}, author = {Bouzbita, S. and El Afia, A. and Faizi, R. and Zbakh, M.} } @conference {Assila2017, title = {A dynamic Stackelberg-Cournot game for competitive content caching in 5G networks}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {The main concept behind 5G mobile network is to expand the idea of small cell network (SCN) to create a cooperative network able to cache data in active nodes inside radio access and Core network. Caching technique is a workaround to deal with bottleneck in the Back-haul, as the capacity of the wireless links could not support the increasing demand for rich multimedia. In this perspective multiple contents providers are in competition for caching space of network operator base stations. In fact, the caching space is a limited resource due to the exponential traffic of mobile data and video consumption. It is in this perspective that mobile operators and contents providers find themselves linked in this market profit generating, and consequently linked also in the allocating cache and setting price issues. In this paper we propose a multi-Stackelberg game between multiple MNOs (leaders) and several CPs (followers) computing under the Cournot-Nash assumption. In the first step a multi-leader Stackelberg game between Multiple MNO, considered as the leaders, aims to define the price they charges the CPs to maximize their profit. In the second step a multi-follower Cournot game between the CPs, considered as the followers, compete to increase the space quantity they cache at the MNOs small base stations (SBS) to maximize also their profit and to improve the quality of service (QoS) of their users. Our goal is to find the price the MNOs will set and the quantity of contents that each CP will cache. In the pricing game, each MNO first sets the price. Then the CPs react with proposed quantities of Space to cache. Then after the MNO sets again an optimal price according to the prediction of each CP0s optimal strategies. Numerical results describe the structure of the Nash equilibrium and the optimal prices resulting from the MNOs and CPs optimal strategies. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238184}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041439197\&doi=10.1109\%2fWINCOM.2017.8238184\&partnerID=40\&md5=36792f2ffea42a36227d37ccdf8af78b}, author = {Assila, B. and Kobbaney, A. and Elmachkourz, M. and El Koutbi, M.} } @conference {Chaimae2017200, title = {ECC certificate for authentication in cloud-based RFID}, booktitle = {Proceedings of 2016 International Conference on Cloud Computing Technologies and Applications, CloudTech 2016}, year = {2017}, note = {cited By 0}, pages = {200-203}, abstract = {Cloud computing is the access via internet to shared resources and services. Merging this technology with RFID enabled the improvement of the latter such as data storage and processing, and both allow to have intelligent environments and access services anytime and anywhere. But this fusion is the subject of some criticism concerning the data security and privacy. In this article we are interested in the authentication of tag/reader to the cloud interface and we propose a protocol using elliptic curve cryptography (ECC) to verify the identity of readers before issuing the certificate which authenticates the reader to tag, and then lets them access the cloud interface. {\"\i}{\textquestiondown}{\textonehalf} 2016 IEEE.}, doi = {10.1109/CloudTech.2016.7847699}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013847389\&doi=10.1109\%2fCloudTech.2016.7847699\&partnerID=40\&md5=98054382849319c9204046d0edee3677}, author = {Chaimae, E. and Romadi, R. and Abdellatif, E.A.} } @article {Sahnoun201722, title = {EEPR-OLSR: An energy efficient and path reliability protocol for proactive mobile Ad-hoc network routing}, journal = {International Journal of Communication Networks and Information Security}, volume = {9}, number = {1}, year = {2017}, note = {cited By 2}, pages = {22-29}, abstract = {Routing in Mobile Ad-hoc Networks has received a lot of attention due to the challenges posed by the self-organizing nature of the network, the dynamic topology, and the unreliable wireless medium. One of the most critical issues for MANETs is how to increase network lifetime, since nodes are typically battery powered. In this paper we consider the proactive MANET protocol OLSR to improve the network lifetime, we propose a novel multiple metric routing scheme for MANET, based on energy efficient and path reliability metrics, integrating it to standard OLSR, named Energy Efficient and Path Reliability OLSR (EEPR-OLSR), in which we investigate cross layer parameters that effect the network lifetime and a prediction-based link availability estimation is introduced. Simulation results, by NS3 simulator, show that the proposed EEPR-OLSR provides significant performance gains in both the network lifetime and packet delivery ration (PDR), compared to the standard OLSR.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85017133726\&partnerID=40\&md5=b9c4bae42e6e2af86bcd792df9345d07}, author = {Sahnoun, A. and Habbani, A. and El Abbadi, J.} } @article {ElAfia20172161, title = {The effect of updating the local pheromone on ACS performance using fuzzy logic}, journal = {International Journal of Electrical and Computer Engineering}, volume = {7}, number = {4}, year = {2017}, note = {cited By 1}, pages = {2161-2168}, abstract = {Fuzzy Logic Controller (FLC) has become one of the most frequently utilised algorithms to adapt the metaheuristics parameters as an artificial intelligence technique. In this paper, the Ξ parameter of Ant Colony System (ACS) algorithm is adapted by the use of FLC, and its behaviour is studied during this adaptation. The proposed approach is compared with the standard ACS algorithm. Computational results are done based on a library of sample instances for the Traveling Salesman Problem (TSPLIB). Copyright {\textcopyright} 2017 Institute of Advanced Engineering and Science. All rights reserved.}, doi = {10.11591/ijece.v7i4.pp2161-2168}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030845141\&doi=10.11591\%2fijece.v7i4.pp2161-2168\&partnerID=40\&md5=b3dc47e58e78c530f0c597ab93eda8bf}, author = {El Afia, A. and Bouzbita, S. and Faizi, R.} } @conference {Boualame2017, title = {An efficient soft decision decoding algorithm using cyclic permutations and compact genetic algorithm}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 0}, abstract = {The compact genetic algorithm cGA is used in this paper to design an efficient soft-decision decoding algorithm, especially for the cyclic codes, because the cGA dramatically reduces the population{\textquoteright}s size and rapidly converges to the optimal solution compared to classical genetic algorithms. Our main contribution is to exploit the cyclic property of cyclic linear codes to reduce the complexity of the decoding process especially in the test sequences generation and re-encoding stage where we use the generator polynomial instead of the generator matrix. The second idea behind our decoding algorithm is the complexity improvement inside of cGA by decreasing the probability vector{\textquoteright}s length, which becomes less than the length of the cGA original one. The experiments were carried out on the most popular cyclic codes, and the results show that the performances of our algorithm are better than some famous decoding algorithms in terms of Bit Error Rate. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843936}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015170559\&doi=10.1109\%2fACOSIS.2016.7843936\&partnerID=40\&md5=249f966f42367ada53bac8892853f1c4}, author = {Boualame, H. and Tahiri, N. and Chana, I. and Azouaoui, A. and Belkasmi, M.} } @conference {Abnane20171302, title = {Empirical evaluation of fuzzy analogy for Software Development Effort Estimation}, booktitle = {Proceedings of the ACM Symposium on Applied Computing}, volume = {Part F128005}, year = {2017}, note = {cited By 0}, pages = {1302-1304}, abstract = {Software Development Effort Estimation (SDEE) plays a primary role in software project management. Among several techniques suggested for estimating software development effort, analogybased software effort estimation approaches stand out as promising techniques. In this paper, the performance of Fuzzy Analogy is compared with that of six other SDEE techniques (Linear Regression, Support Vector Regression, Multi-Layer Perceptron, M5P and Classical Analogy). The first step of the evaluation aimed to ensure that the SDEE techniques outperformed random guessing by using the Standardized Accuracy (SA). Then, we used a set of reliable performance measures and Borda count to rank them and identify which techniques are the most accurate. The results suggest that Fuzzy Analogy statistically outperformed the other SDEE techniques regardless of the dataset used. {\textcopyright} 2017 ACM.}, doi = {10.1145/3019612.3019905}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020898807\&doi=10.1145\%2f3019612.3019905\&partnerID=40\&md5=df809b8bee59594505af4961af714d5e}, author = {Abnane, I. and Idri, A. and Abran, A.} } @article {Sahnoun2017399, title = {An energy-efficient proactive routing scheme for MANET: Game theoretical approach of forwarding with selfish nodes}, journal = {International Journal of Electronics and Telecommunications}, volume = {63}, number = {4}, year = {2017}, note = {cited By 0}, pages = {399-404}, abstract = {In Mobile Ad-hoc Networks, nodes exchange packets with each other using intermediate nodes as relays. Since nodes in MANETs are battery powered, energy conservation is a crucial issue. Accepting relay all request may not be in the best interest of a node. But if many nodes prefer not to consume energy in relaying packets on behalf of others, the overall performance of routing in network will be influenced. In this paper we address the energy-efficient routing problem in MANETs with selfish nodes. We modeled this problem as a game-theoretic constraint optimization; we defined the utility of each node as a weighted difference between a performance metric and some transmission costs. A motivate mechanism is proposed in order to induce nodes to forwarding cooperation. Each node independently implements the optimal equilibrium strategy under the given constraints. Simulation results by NS3 simulator show that our proposed approach can improve system performance in network lifetime and packet delivery ratio. {\textcopyright} 2017 De Gruyter Open Ltd. All rights reserved.}, doi = {10.1515/eletel-2017-0055}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85042877707\&doi=10.1515\%2feletel-2017-0055\&partnerID=40\&md5=62e06e5bfb6b7ad51299584d89bc9a68}, author = {Sahnoun, A. and Habbani, A. and El Abbadi, J.} } @conference {Abnane2017, title = {Evaluating Fuzzy Analogy on incomplete software projects data}, booktitle = {2016 IEEE Symposium Series on Computational Intelligence, SSCI 2016}, year = {2017}, note = {cited By 0}, abstract = {Missing Data (MD) is a widespread problem that can affect the ability to use data to construct effective software development effort prediction systems. This paper investigates the use of missing data (MD) techniques with Fuzzy Analogy. More specifically, this study analyze the predictive performance of this analogy-based technique when using toleration, deletion or k-nearest neighbors (KNN) imputation techniques using the Pred(0.25) accuracy criterion and thereafter compares the results with the findings when using the Standardized Accuracy (SA) measure. A total of 756 experiments were conducted involving seven data sets, three MD techniques (toleration, deletion and KNN imputation), three missingness mechanisms (MCAR: missing completely at random, MAR: missing at random, NIM: non-ignorable missing), and MD percentages from 10 percent to 90 percent. The results of accuracy measured in terms of Pred(0.25) confirm the findings of a study which used the SA measure. Moreover, we found that SA and Pred(0.25) measure different aspects of technique performance. Hence, SA is not sufficient to conclude about the technique accuracy and it should be used with other metrics, especially Pred(0.25). {\textcopyright} 2016 IEEE.}, doi = {10.1109/SSCI.2016.7849922}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016044171\&doi=10.1109\%2fSSCI.2016.7849922\&partnerID=40\&md5=9ece9d331d7bf0bbef07ed39c97ca2d1}, author = {Abnane, I. and Idri, A.} } @article {Chentouf2017, title = {Evaluating the impact of max transition constraint variations on power reduction capabilities in cell-based designs}, journal = {Journal of Low Power Electronics and Applications}, volume = {7}, number = {4}, year = {2017}, note = {cited By 0}, abstract = {Power optimization is a very important and challenging step in the physical design flow, and it is a critical success factor of an application-specific integrated circuit (ASIC) chip. Many techniques are used by the place and route (P\&R) electronic design automation (EDA) tools to meet the power requirement. In this paper, we will evaluate, independently from the library file, the impact of redefining the max transition constraint (MTC) before the power optimization phase, and we will study the impact of over-constraining or under-constraining a design on power in order to find the best trade-off between design constraining and power optimization. Experimental results showed that power optimization depends on the applied MTC and that the MTC value corresponding to the best power reduction results is different from the default MTC. By using a new MTC definition method on several designs, we found that the power gain between the default methodology and the new one reaches 2.34\%. {\textcopyright} 2017 by the authors. Licensee MDPI, Basel, Switzerland.}, doi = {10.3390/jlpea7040025}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031285166\&doi=10.3390\%2fjlpea7040025\&partnerID=40\&md5=27c21efca64d37511e1a35fb74200dcf}, author = {Chentouf, M. and El Abidine, A.I.Z.} } @conference {Faizi2017997, title = {Extracting business value from big data}, booktitle = {Proceedings of the 29th International Business Information Management Association Conference - Education Excellence and Innovation Management through Vision 2020: From Regional Development Sustainability to Global Economic Growth}, year = {2017}, note = {cited By 0}, pages = {997-1002}, abstract = {The exponential growth of structured and unstructured data in the last few years has resulted in the emergence of {\textquoteright}big data{\textquoteright} as one of the hottest technology trends. With data of all kinds being generated in record amounts every minute, big data has become a buzzword across various fields. The objective of this paper is, therefore, to explore the various ways through which businesses can derive value from both internal and external data. In this respect, it was found out that companies, both big and small - across all industries - can use big data to increase customers{\textquoteright} retention and to meet the need of every individual customer. This huge amount of data can also help a business identify the strengths and weaknesses of its competitors and, thus, stay ahead of them. Finally, big data can provide businesses with real-time insights that would enable them to spot or predict problems, and solve them before they happen. Given all these benefits, we strongly believe that the collection and processing of the massive amounts of data, generated by customers or smart devices, can help any enterprise make more powerful and accurate decisions and, thus, significantly improve business operations and organizational performance.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029742611\&partnerID=40\&md5=bfa9daca776ba7dea239abddfc70501a}, author = {Faizi, R. and El Fkihi, S. and El Afia, A. and Chiheb, R.} } @conference {Abouyahya201746, title = {Features extraction for facial expressions recognition}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {46-49}, abstract = {The recognition of an expression seems obvious and easy when classified by the human brain. However, it is clearly difficult for a computer to detect human face, extract all of the components characterizing the facial expression and then determine its classification from a single image. Moreover, based on videos, the process becomes even more complex because it must take simultaneously into account the temporal and spatial information available. Also, It should be noted that facial features have an important fact to developing a robust face representation because it aims to select the best of features and reduce dimensionality of features set by finding a new set which contains most of the face features information. For those reasons, this paper present several features extraction approaches for facial expressions recognition as state-of-the-art review. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905642}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019146665\&doi=10.1109\%2fICMCS.2016.7905642\&partnerID=40\&md5=9a7be9980a63d7021cf8136fdf05dfd5}, author = {Abouyahya, A. and El Fkihi, S. and Thami, R.O.H. and Aboutajdine, D.} } @article {Elmouhtadi2017714, title = {Fingerprint identification using hierarchical matching and topological structures}, journal = {Advances in Intelligent Systems and Computing}, volume = {533}, year = {2017}, note = {cited By 0}, pages = {714-722}, abstract = {Fingerprint identification is one of the most popular and efficient biometric technique used for improving automatic personal identification. In this paper, we will present a new indexing based method, in the first step, on estimation of singular point considered as an important feature in the fingerprint using a directional file. In the second step, a hierarchical Delaunay triangulation was applied on the minutiae around the singular point extracted. Comparison of two fingerprints was calculated by introducing the barycenter notion so as to ensure the exact location of the similar triangles. We have performed extensive experiments and comparisons to demonstrate the effectiveness of the proposed approach using a challenging public database (i.e., FVC2000) which contains small area and low quality fingerprints. {\textcopyright} Springer International Publishing AG 2017.}, doi = {10.1007/978-3-319-48308-5_68}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84994504482\&doi=10.1007\%2f978-3-319-48308-5_68\&partnerID=40\&md5=8c568e5df1645e23782fc831e2182b69}, author = {Elmouhtadi, M.a and El Fkihi, S.a b and Aboutajdine, D.a} } @article {Elmouhtadi2017714, title = {Fingerprint identification using hierarchical matching and topological structures}, journal = {Advances in Intelligent Systems and Computing}, volume = {533}, year = {2017}, note = {cited By 1}, pages = {714-722}, abstract = {Fingerprint identification is one of the most popular and efficient biometric technique used for improving automatic personal identification. In this paper, we will present a new indexing based method, in the first step, on estimation of singular point considered as an important feature in the fingerprint using a directional file. In the second step, a hierarchical Delaunay triangulation was applied on the minutiae around the singular point extracted. Comparison of two fingerprints was calculated by introducing the barycenter notion so as to ensure the exact location of the similar triangles. We have performed extensive experiments and comparisons to demonstrate the effectiveness of the proposed approach using a challenging public database (i.e., FVC2000) which contains small area and low quality fingerprints. {\textcopyright} Springer International Publishing AG 2017.}, doi = {10.1007/978-3-319-48308-5_68}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84994504482\&doi=10.1007\%2f978-3-319-48308-5_68\&partnerID=40\&md5=8c568e5df1645e23782fc831e2182b69}, author = {Elmouhtadi, M. and El Fkihi, S. and Aboutajdine, D.} } @conference {Elmouhtadi2017434, title = {Fingerprints indexing algorithms based on multiple characteristics}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {434-437}, abstract = {Fingerprint identification in large databases is a leading concern preoccupation problem since biometrics still the most reliable and secure means of identification systems. Hence the need to find an accurate and relevant system of research and identification. In this paper we present the different approaches of fingerprints indexing algorithms that have proven satisfactory results in the field, evaluated on large databases provided by FVC. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7805086}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010223948\&doi=10.1109\%2fCIST.2016.7805086\&partnerID=40\&md5=d8555e034c9089955faeed9880ac4dda}, author = {Elmouhtadi, M.a and El Fkihi, S.b and Aboutajdine, D.a} } @conference {Elmouhtadi2017434, title = {Fingerprints indexing algorithms based on multiple characteristics}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {434-437}, abstract = {Fingerprint identification in large databases is a leading concern preoccupation problem since biometrics still the most reliable and secure means of identification systems. Hence the need to find an accurate and relevant system of research and identification. In this paper we present the different approaches of fingerprints indexing algorithms that have proven satisfactory results in the field, evaluated on large databases provided by FVC. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7805086}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010223948\&doi=10.1109\%2fCIST.2016.7805086\&partnerID=40\&md5=d8555e034c9089955faeed9880ac4dda}, author = {Elmouhtadi, M. and El Fkihi, S. and Aboutajdine, D.} } @article {ElAsri2017217, title = {From periphery to core: A temporal analysis of github contributors{\textquoteright} collaboration network}, journal = {IFIP Advances in Information and Communication Technology}, volume = {506}, year = {2017}, note = {cited By 0}, pages = {217-229}, abstract = {Open-source projects in GitHub exhibit rich temporal dynamics, and diverse contributors{\textquoteright} social interactions further intensify this process. In this paper, we analyze temporal patterns associated with Open Source Software (OSS) projects and how the contributor{\textquoteright}s notoriety grows and fades over time in a core-periphery structure. In order to explore the temporal dynamics of GitHub communities we formulate a time series clustering model using both Social Network Analysis (SNA) and technical metrics. By applying an adaptive time frame incremental approach to clustering, we locate contributors in different temporal networks. We demonstrate our approach on five long-lived OSS projects involving more than 700 contributors and found that there are three main temporal shapes of attention when contributors shift from periphery to core. Our analyses provide insights into common temporal patterns of the growing OSS communities on GitHub and broaden the understanding of the dynamics and motivation of open source contributors. {\textcopyright} IFIP International Federation for Information Processing 2017.}, doi = {10.1007/978-3-319-65151-4_21}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029582330\&doi=10.1007\%2f978-3-319-65151-4_21\&partnerID=40\&md5=741b4dffe841d068e54b517f5b1966c8}, author = {El Asri, I. and Kerzazi, N. and Benhiba, L. and Janati, M.} } @conference {Idri2017114, title = {Fuzzy Analogy Based Effort Estimation: An Empirical Comparative Study}, booktitle = {IEEE CIT 2017 - 17th IEEE International Conference on Computer and Information Technology}, year = {2017}, note = {cited By 0}, pages = {114-121}, abstract = {Software Development Effort Estimation (SDEE) plays a primary role in software project management. Among several techniques suggested for estimating software development effort, analogy-based software effort estimation approaches stand out as promising techniques.In this paper, the performance of Fuzzy Analogy is compared with that of six other SDEE techniques (Linear Regression, Support Vector Regression, Multi-Layer Perceptron, M5P and Classical Analogy). The evaluation of the SDEE techniques was performed over seven datasets with two evaluation techniques (All-in and Jackknife). The first step of the evaluation aimed to ensure that the SDEE techniques outperformed random guessing by using the Standardized Accuracy (SA). Then, we used a set of reliable performance measures (Pred(0.25), MAE, MBRE, MIBRE and LSD) and Borda count to rank them and identify which techniques are the most accurate.The results suggest that when using All-in evaluation, Fuzzy Analogy statistically outperformed the other SDEE techniques regardless of the dataset used. However, when using Jackknife evaluation, the results obtained depended on the dataset and the SDEE technique used. The results suggest that Fuzzy Analogy is a promising technique for software development effort estimation. {\textcopyright} 2017 IEEE.}, doi = {10.1109/CIT.2017.29}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032383384\&doi=10.1109\%2fCIT.2017.29\&partnerID=40\&md5=2d4f527b6750abd19f4a3787348a493a}, author = {Idri, A. and Abnane, I.} } @conference {ElBassiti20171097, title = {Generic innovation designing -GenID- Framework: Towards a more systematic approach to innovation management}, booktitle = {Proceedings of the European Conference on Knowledge Management, ECKM}, volume = {2}, year = {2017}, note = {cited By 0}, pages = {1097-1106}, abstract = {To achieve sustainable success in the global market, modern organizations need to be flexible and fast in their reaction to change, which depends on their ability to innovate, not just occasionally but systematically. Despite countless efforts and spending, many organizations feel they are not making the most of their innovation potential and resources and don{\textquoteright}t generate desirable profit. This problem does not lie in a lack of ideas, but more in a structured approach to innovation. Believing that most pressing problems organizations face today are characterized by unprecedented levels of complexity and interdependence leads to breakdown the conventional problem-solving paradigm. However, a structured approach to innovation management could be criticized as it may lead to rigidity and hamper creativity. Thus, there is a need to allow a trade-off between granting conditions for creativity, so new ideas can flourish, and at the same time keeping a systematic approach to smooth social cohesion, facilitate the pool of resources and promote the creation of a collaborative community. From the exploration of the distinguishing characteristics of the new economy and the new millenary, this paper provides the foundation of a systematic framework to innovation management. Based on the complexity and structuration theories, the concept "Innovation" has been formalized in two generic models: (1) Innovation Activities Model providing an integrated view of interactions involved within an innovation context, (2) Innovation Lifecycle Model portraying the major milestones over an innovation journey. These constructs has been assessed through an online survey designed to gather qualitative data. {\textcopyright} The Authors, 2017.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035206916\&partnerID=40\&md5=2b32e9c5fe0b744d39d7f634493360b5}, author = {El Bassiti, L. and El Haiba, M. and Ajhoun, R.} } @conference {Afia2017, title = {A global mapping of the Moroccan supply chain of hospital drugs, and a simulation of the dispensation process}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Pharmaceuticals are an important component of the health system activity. Their contribution to improving the health status is vital. To ensure sound management of the pharmaceutical products logistics (orders, delivery, storage and distribution), a new procurement strategy has been adopted by Morocco in 2012. This strategy is based on a centralized procurement, by which the purchase is grouped by anticipation (the budget for the year in progress / the exercise of the following year), and a decentralized storage and distribution allowing the territory stratification in eight regional depots: Berrechid, Oujda, Al-Hoceima, Laayoune, Agadir, Marrakech, Meknes and Tangier. The challenges this environment is facing justify the focus on this research subject, and the contribution of this work includes a global mapping of the Moroccan supply chain of hospital drugs, and a simulation of the dispensation process comparing the global and the nominative dispensation. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090465}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028455352\&doi=10.1145\%2f3090354.3090465\&partnerID=40\&md5=fbdb2614d215837a4276397094cfd697}, author = {Afia, A.E. and Mezouar, H.} } @conference {Hosni2017174, title = {Heterogeneous Ensembles for Software Development Effort Estimation}, booktitle = {Proceedings - 2016 3rd International Conference on Soft Computing and Machine Intelligence, ISCMI 2016}, year = {2017}, note = {cited By 0}, pages = {174-178}, abstract = {Software effort estimation influences almost all the process of software development such as: bidding, planning, and budgeting. Hence, delivering an accurate estimation in early stages of the software life cycle may be the key of success of any project. To this aim, many solo techniques have been proposed to predict the effort required to develop a software system. Nevertheless, none of them proved to be suitable in all circumstances. Recently, Ensemble Effort Estimation has been investigated to estimate software effort and consists on generating the software effort by combining more than one solo estimation technique by means of a combination rule. In this study, a heterogeneous EEE based on four machine learning techniques was investigated using three linear rules and two well-known datasets. The results of this study suggest that the proposed heterogeneous EEE yields a very promising performance and there is no best combiner rule that can be recommended. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ISCMI.2016.15}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85034656098\&doi=10.1109\%2fISCMI.2016.15\&partnerID=40\&md5=86bd61c4459af4a3491046df1925ab2c}, author = {Hosni, M. and Idri, A. and Nassif, A.B. and Abran, A.} } @article {Afia20179997, title = {Hidden markov model control of inertia weight adaptation for Particle swarm optimization}, journal = {IFAC-PapersOnLine}, volume = {50}, number = {1}, year = {2017}, note = {cited By 0}, pages = {9997-10002}, abstract = {Particle swarm optimization (PSO) is a stochastic algorithm based population that integrates social interactions of animals in nature. One of the main challenges within PSO is to balance between global and local search throughout the course of a run. To achieve this trade-off, various adaptive PSOs have been proposed in order to control the values of its parameters. The present paper makes an attempt to determine a generalized adaptive framework for the setting of the inertia weight parameter which is named HMM-wPSO. That is, a control mechanism of the inertia weight is proposed based on the estimation of states using hidden Markov model (HMM). We performed evaluations on ten benchmark functions to test the HMM control of inertia weight parameter for the PSO. Experimental results show that our proposed scheme outperforms other compared PSO variants in major cases in terms of solution accuracy and convergence speed. {\textcopyright} 2017}, doi = {10.1016/j.ifacol.2017.08.2030}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031775272\&doi=10.1016\%2fj.ifacol.2017.08.2030\&partnerID=40\&md5=3e5c7fac9adf59cd9559143a77cf11ab}, author = {Afia, A.E. and Sarhani, M. and Aoun, O.} } @conference {Lalaoui2017558, title = {Hidden Markov Model for a self-learning of Simulated Annealing cooling law}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {558-563}, abstract = {The Simulated Annealing (SA) is a stochastic local search algorithm. It is an adaptation of the Metropolis-Hastings Monte Carlo algorithm. SA mimics the annealing process in metallurgy to approximate the global optimum of an optimization problem and uses a temperature parameter to control the search. The efficiency of the simulated annealing algorithm involves the adaptation of the cooling schedule. In this paper, we integrate Hidden Markov Model (HMM) in SA to iteratively predict the best cooling law according to the search history. Experiments performed on many benchmark functions show that our proposed scheme outperforms other SA variants in term of quality of solutions. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905557}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019141326\&doi=10.1109\%2fICMCS.2016.7905557\&partnerID=40\&md5=68063f3aef36190a9965517a3e628711}, author = {Lalaoui, M. and El Afia, A. and Chiheb, R.} } @conference {Haddad2017484, title = {A high gain Novel Dielectric Resonator Antenna (NDRA) for anti-collision short range radar (SRR) application}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {484-486}, abstract = {This paper presents the design of Novel Dielectric Resonator Antenna (NDRA) feed by a micro-strip line for anti-collision radar SRR application at 24 GHz. The proposed NDRA operates at a frequency of 24 GHz with a dielectric constant of 18. The simulated NDRA has a high gain (12.24 dB) and a high radiation efficiency (94 \%). The return loss, radiation pattern and gain of the proposed antenna are evaluated. The simulation process was carried out using Computer Simulation Technology (CST) Microwave Studio. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905582}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019154107\&doi=10.1109\%2fICMCS.2016.7905582\&partnerID=40\&md5=5e26a28411e91981e44e0fd02ad0dda4}, author = {Haddad, A. and Aoutoul, M. and Rais, K. and Essaaidi, M.} } @conference {YoussfiAlaoui2017, title = {Human fall detection using von mises distribution and motion vectors of interest points}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {In the field of public health care, fall detection is one of the major problem, especially for elderly persons. For that, an effective surveillance system is a necessity to reduce injuries caused by falls. Our article presents a new method to detect falls. In fact, we used optical flow to calculate motion vectors and statistical distribution named von Mises. {\textcopyright} 2017 Association for computing machinery.}, doi = {10.1145/3090354.3090438}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028462198\&doi=10.1145\%2f3090354.3090438\&partnerID=40\&md5=f0c8dbfd8b6a8894a4a085817ad5ebe6}, author = {Youssfi Alaoui, A. and El Hassouny, A. and Oulad Haj Thami, R. and Tairi, H.} } @article {Abbassi2017210, title = {A hybrid algorithm for vehicle routing problem with time windows and target time}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {95}, number = {1}, year = {2017}, note = {cited By 1}, pages = {210-219}, abstract = {The routing of a fleet of vehicles to service a set of customers is important in the field of goods distribution. Vehicle routing problem with time windows (VRPTW) is a well-known combinatorial problem. This article aims at studying the vehicle routing problem with time windows and target time (VRPTWTT). VRPTWTT involves the routing of a set of vehicles with limited capacity from a central depot to a set of geographically dispersed customers with known demands and predefined time windows as well as a target time. There are penalties associated with servicing either earlier or later than this target servicing time. The goal is to minimize the costs of transport and penalties of delay and ahead of time. Although VRPTWTT is a new variant of the VRP with time windows, the problem is not easy to solve, and it is also NP-hard. To solve the VRPTWTT, we propose a hybrid method combining Neighborhood search with Ant Colony Optimization Algorithm (ACO). Furthermore, when ACO is close to current optimal solution, neighborhood search is used to maintain the diversity of ACO and explore new solutions. First, we present a description of the hybrid method followed by computational results and the conclusion. {\textcopyright} 2005 - 2017 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010188946\&partnerID=40\&md5=ab0a6c69de17f1da6b246486211ec193}, author = {Abbassi, A. and El Bouyahyiouy, K. and El Hilali Alaoui, A. and Bellabdaoui, A.} } @article {Abderrahman2017210, title = {A hybrid algorithm for vehicle routing problem with time windows and target time}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {95}, number = {1}, year = {2017}, note = {cited By 0}, pages = {210-219}, abstract = {The routing of a fleet of vehicles to service a set of customers is important in the field of goods distribution. Vehicle routing problem with time windows (VRPTW) is a well-known combinatorial problem. This article aims at studying the vehicle routing problem with time windows and target time (VRPTWTT). VRPTWTT involves the routing of a set of vehicles with limited capacity from a central depot to a set of geographically dispersed customers with known demands and predefined time windows as well as a target time. There are penalties associated with servicing either earlier or later than this target servicing time. The goal is to minimize the costs of transport and penalties of delay and ahead of time. Although VRPTWTT is a new variant of the VRP with time windows, the problem is not easy to solve, and it is also NP-hard. To solve the VRPTWTT, we propose a hybrid method combining Neighborhood search with Ant Colony Optimization Algorithm (ACO). Furthermore, when ACO is close to current optimal solution, neighborhood search is used to maintain the diversity of ACO and explore new solutions. First, we present a description of the hybrid method followed by computational results and the conclusion. {\textcopyright} 2005 - 2017 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010188946\&partnerID=40\&md5=ab0a6c69de17f1da6b246486211ec193}, author = {Abderrahman, A.a and Karim, E.B.b and Ahmed, E.H.A.a and Adil, B.b} } @conference {L{\textquoteright}Amrani2017736, title = {Identity management systems: Laws of identity for models7 evaluation}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {736-740}, abstract = {The digital identity is the representation of an active entity (Person, actor), it{\textquoteright}s used by most systems to allow access to resources. When users are involved in many domains they should hardly remember a lot of authentication criterions for every access. They exist many identity management systems that aim to solve the issues in relation with digital identity. However, the problem of identification process and Cross-Domain target the digital identity use. In this work, we analyze the existing identity management systems within the laws of identity, which make a roadmap for managing the identities migration cross-domain. A comparative study between identity management systems based on the laws of identity as evaluation criteria of those systems is the main of this work. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7804984}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010194106\&doi=10.1109\%2fCIST.2016.7804984\&partnerID=40\&md5=ce22c4c9d844e8bb028241fc0debd7b6}, author = {L{\textquoteright}Amrani, H. and Berroukech, B.E. and El Bouzekri El Idrissi, Y. and Ajhoun, R.} } @conference {L{\textquoteright}Amrani2017736, title = {Identity management systems: Laws of identity for models7 evaluation}, booktitle = {Colloquium in Information Science and Technology, CIST}, year = {2017}, note = {cited By 0}, pages = {736-740}, abstract = {The digital identity is the representation of an active entity (Person, actor), it{\textquoteright}s used by most systems to allow access to resources. When users are involved in many domains they should hardly remember a lot of authentication criterions for every access. They exist many identity management systems that aim to solve the issues in relation with digital identity. However, the problem of identification process and Cross-Domain target the digital identity use. In this work, we analyze the existing identity management systems within the laws of identity, which make a roadmap for managing the identities migration cross-domain. A comparative study between identity management systems based on the laws of identity as evaluation criteria of those systems is the main of this work. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CIST.2016.7804984}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010194106\&doi=10.1109\%2fCIST.2016.7804984\&partnerID=40\&md5=ce22c4c9d844e8bb028241fc0debd7b6}, author = {L{\textquoteright}Amrani, H.a and Berroukech, B.E.b and El Bouzekri El Idrissi, Y.b and Ajhoun, R.a} } @article {Kerzazi2017247, title = {Knowledge flows within open source software projects: A social network perspective}, journal = {Lecture Notes in Electrical Engineering}, volume = {397}, year = {2017}, note = {cited By 0}, pages = {247-258}, abstract = {Developing software is knowledge-intensive activity, requiring extensive technical knowledge and awareness. The abstract part of development is the social interactions that drive knowledge flows between contributors, especially for Open Source Software (OSS). This study investigated knowledge sharing and propagation from social perspective using social network analysis (SNA). We mined and analyzed the issue and review histories of three OSS from GitHub. Particular attention has been paid to the socio-interactions through comments from contributors on reviews. We aim at explaining the propagation and density of knowledge flows within contributor networks. The results show that review requests flow from the core contributors toward peripheral contributors and comments on reviews are in a continuous loop from the core teams to the peripherals and back; and the core contributors leverage on their awareness and technical knowledge to increase their notoriety by playing the role of communication brokers supported by comments on work items. {\textcopyright} Springer Science+Business Media Singapore 2017.}, doi = {10.1007/978-981-10-1627-1_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84996606731\&doi=10.1007\%2f978-981-10-1627-1_19\&partnerID=40\&md5=a2da5970c55b3e81dc2e09bda67e1649}, author = {Kerzazi, N. and El Asri, I.} } @conference {Labghough2017, title = {Majority logic coding schemes performance over FSO channels}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 0}, abstract = {In this paper, the performance study of OSMLD convolutional self orthogonal (CSOC) and OSMLD block codes over Gamma-Gamma ΓΓ atmospheric turbulence channel is studied. We analyse the efficiency of channel coding technique for different atmospheric turbulence conditions by modeling the channel with a Gamma-Gamma ΓΓ distribution and with BPSK modulation. The results obtained by Monte Carlo simulation compare different coding schemes with different coding rates and with varying the effect of atmospheric turbulence. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843942}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015200697\&doi=10.1109\%2fACOSIS.2016.7843942\&partnerID=40\&md5=02e1867fcf077f466dc54bd1c2fa7108}, author = {Labghough, S. and Ayoub, F. and Belkasmi, M.} } @conference {Mrhar2017557, title = {Making MOOCs matter in formal education through a federating environment}, booktitle = {Proceedings of the European Conference on e-Learning, ECEL}, volume = {2010-October}, year = {2017}, note = {cited By 0}, pages = {557-565}, abstract = {With the emergence of open education and MOOCs, the opportunities and contribution of non-formal learning to the acquisition of knowledge and skills have increased. This type of learning, which has the advantage of being voluntary relying mainly on the learners{\textquoteright} motivation, remains technically invisible to formal learning environments. Finding suggest that the formal learning becomes increasingly less adapted to the learners needs because it does not take into consideration the real learner{\textquoteright}s profile (knowledge, skills, etc.) updated by non-formal learning. In order, to bridging formal and non-formal learning, we are aiming to personalize formal learning by recovering the learner{\textquoteright}s knowledge, abilities and skills which are acquired by non-formal learning (MOOCs). We propose in this paper, a federating environment for MOOCs hosted in different platforms such as (Coursera, open EdX..). The main objective of this environment is to provide to the formal learning environment a recommender system of MOOCs. The technical aspects of a federating environment of MOOCs (FEM) are presented. FEM is composed of an integration system and a recommender system of MOOCs. The integration system is responsible for integrating data emanating from different heterogeneous MOOCs platforms. The recommender system is based on the learners{\textquoteright} profiles and on the pedagogical objectives set by the concerned establishment that integrates the federating environment FEM. FEM also enables establishments to adapt the formal learning through the enriched learners{\textquoteright} profiles. {\textcopyright} The Authors, 2017.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85037541912\&partnerID=40\&md5=345a1683d5b625a972e3de33841d44c8}, author = {Mrhar, K. and Zary, N. and Abik, M.} } @article {Benallou2017525, title = {Making the case for defined benefit pension plans self-adjusting steering schemes}, journal = {International Journal of Applied Business and Economic Research}, volume = {15}, number = {4}, year = {2017}, note = {cited By 0}, pages = {525-550}, abstract = {In an economically and politically instable context with strong demographic shifts and weak pension plan long-term robustness, Self-Adjusting Steering Schemes (SASS) present several advantages including reactivity, relevance, economic fairness, smooth incremental adjustments and protection against political malice. This article proposes a probabilistic model to illustrate the impact of SASS on overall liability risk mitigation through some simulations on a simplified defined benefit pension plan. It highlights the cost of inaction in pension plan liability management and advocates the systematic implementation of the proposed SASS schemes. The proposed SASS schemes act on several parameters such as retirement start age, pension computation formulae or pension indexation by dynamically changing their value to cope with the materialized risk factors. It concludes with a discussion on the fairness of the proposed risk-sharing approach among the pension plan{\textquoteright}s different stakeholders. {\textcopyright} Serials Publications Pvt. Ltd.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016281336\&partnerID=40\&md5=58b676ab725a8d765c5d04b28704cd43}, author = {Benallou, O. and Aboulaich, R. and Nadem, Y.} } @conference {Amine20171245, title = {A many-to-many matching game in ultra-dense LTE HetNets}, booktitle = {2017 13th International Wireless Communications and Mobile Computing Conference, IWCMC 2017}, year = {2017}, note = {cited By 0}, pages = {1245-1250}, abstract = {In this work, we focus our study to improving the energy efficiency of mobile cellular users in ultra-dense LTE HetNets. The hyper-dense co-channel deployment of indoor LTE small cell networks (SCNs) within LTE macro cell networks (MCNs) will aggravate the effect of cross-tier interferences caused by the uplink transmissions of macro-indoor users located inside the overlapping zones of small base station (SBS) coverage areas. Hence, degrading the uplink performance at the level of SBSs adopting closed access policy. In order to eliminate the severe cross-tier interferences, each SBS attempts to open the access for macro-indoor users that accept only the SBS with Max-SINR offer. This will lead to network congestion problems in several SCNs. Wherefore, we formulate our problem as a many-to-many matching game. Then, we introduce an algorithm that computes the optimal many-to-many stable matching which consist of assigning each macro-indoor user with multi-homing capabilities to the most suitable set of SBSs and vice versa based on their preference profiles. With regard to the conventional Max-SINR association scheme, our solution can effectively improve the energy efficiency of cellular users. Moreover, it can ensure load balancing in ultra-dense LTE HetNets. {\textcopyright} 2017 IEEE.}, doi = {10.1109/IWCMC.2017.7986463}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027886532\&doi=10.1109\%2fIWCMC.2017.7986463\&partnerID=40\&md5=a4a95449b5ea20b54454dcf7b7bd1e59}, author = {Amine, M. and Walid, A. and Kobbane, A. and Cherkaoui, S.} } @conference {Azougaghe2017, title = {Many-to-one matching game towards secure virtual machines migration in cloud computing}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 0}, abstract = {Virtual machine migration represents a new topic of research last years. In this context, we present a new system based on matching game theory to design efficient and practical migration algorithms that work well with huge numbers of VMs and servers. This strategy solves VM migration, resources managements and load balancing problems in cloud computing environment. The experimental results show that our algorithm not only obtains multi-resource load balancing performance but also improves the cloud security services and gets a gain of resources consummation on all servers. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843922}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015191060\&doi=10.1109\%2fACOSIS.2016.7843922\&partnerID=40\&md5=6b59b3295f8598830c0771717fb9a86b}, author = {Azougaghe, A. and Oualhaj, O.A. and Hedabou, M. and Belkasmi, M. and Kobbane, A.} } @conference {Aznabet2017757, title = {Meander-line UHF RFID tag antenna loaded with split ring rersonator}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {757-759}, abstract = {In this paper, a new meander line UHF RFID tag antenna loaded with split ring resonator is presented. The antenna has a simple structure and low profile with only one layer of FR4 dielectric substrate and metallization. To achieve a good and impedance matching, the antenna has been loaded with a metamaterial structure (SRR). The obtained impedance bandwidth covers the American band (900-928 MHz). The proposed design provided quasi omnidirectional radiation pattern and radiation efficiency near to 90\% in the operating band. The calculated reading range was 4.36 m at center frequency band. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905576}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019056816\&doi=10.1109\%2fICMCS.2016.7905576\&partnerID=40\&md5=b094791a81d82959bf07b08b899c289d}, author = {Aznabet, I. and Ennasar, M. and El Mrabet, O. and Tedjini, S. and Khalladi, M.} } @conference {Chabibi2017, title = {Metamodeling approach for creating an abstract representation of simulation tools concepts}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, year = {2017}, note = {cited By 0}, abstract = {Models have always been adopted in trades implemented in Systems Engineering (SE). Those models go from concrete representations, such as reduced plans or models, to abstract ones like equations systems. In this context, SysML became an SE standard because of its capabilities of supporting the specification, analysis, design, verification and validation of a broad range of systems and systems-of-systems. However, SysML descriptive models are insufficient to perform system behavior verifications. This lack can be handled by simulation process that allows performing experiments on models to eliminate poor design alternatives, and ensures that a preferred alternative meets the stakeholders{\textquoteright} objectives. As design process efficiency is considerably reduced by the fact that both system modeling and simulation tools are often used separately, several research works aim combining and integrating both approaches in a common framework. This paper proposes a study of common constructs, semantics and modeling methodologies of simulation tools on the basis of whom we define a modeling language that we name: Simulation Modeling Language. This latter is aimed to bridge the gap between SysML modeling and various simulation tools. {\textcopyright} 2016 IEEE.}, doi = {10.1109/AICCSA.2016.7945702}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85022040665\&doi=10.1109\%2fAICCSA.2016.7945702\&partnerID=40\&md5=d1fe0f6b95968d7d57159b55cefc34da}, author = {Chabibi, B. and Anwar, A. and Nassar, M.} } @conference {Amraoui2017278, title = {Mobility quantification for MultiPoint Relays selection algorithm in Mobile Ad hoc Networks}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {278-283}, abstract = {In Mobile Ad hoc Networks (MANETs), with Optimized Link State Routing Protocol (OLSR) the mobility concept is an essential element which can result in the evolution of network performances. In this paper, the main objective is to develop an algorithm to improve the MultiPoint Relay (MPR) selection process in such networks. This algorithm is based on the Mobility Rate (MR) which in turn is relied on the relative velocity of nodes. Additionally, in this algorithm, each node keeps a mobility rate record of other nodes. Moreover, this mobility value will be exchanged between nodes using OLSR messages (HELLO and Topology control (TC)). Furthermore, this value will be used as a criterion when a node chooses their MPR set. In addition, the simulation results using Network Simulator 3 (NS3) have shown that the mobility concept could improve network performances in terms of the throughput, packet received, packet loss, packet delivery ratio and packet forwarded. Moreover, and through this paper the proposed algorithm can be used as a functional mobility mechanism to improve network performances in MANETs. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905672}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019077851\&doi=10.1109\%2fICMCS.2016.7905672\&partnerID=40\&md5=72838ebe1c1b4730920daf92d4fc133c}, author = {Amraoui, H. and Habbani, A. and Hajami, A.} } @conference {ElMejjatti201764, title = {Moisture vulnerability of antenna operation in UHF band}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F130526}, year = {2017}, note = {cited By 0}, pages = {64-68}, abstract = {Behavior of the propagation medium in reaction with electromagnetic fields is described by its dielectric permittivity; this electric property is a function of the physical characteristics of the medium and those of the propagating wave. Among the parameters, which considerably affect the dielectric permittivity of a medium, its water content. In fact, when the snow wetness varies from 0 to 50\% in the melting layer, real part of the mixture permittivity varies from 1.47 to 5.86. Similarly, when soil moisture varies from 0 to 40\% real and imaginary parts of effective permittivity of soil, air and water mixture vary from 4.13 to 24.85 and from 0 to 3.44 respectively. This dynamism in the permittivity as a function of the humidity is able to alter the efficiency of the antenna within the application. In this paper, we propose a study to analyze the impact of wetness on the performance of a dipole antenna with central frequency of 900 MHz. The study was carried out by simulating the behavior of the dipole antenna model in Finite Element Method (FEM) full wave simulator with different values of the water content of two propagation mediums: melting layer and soil. Simulation results concerning the resonant frequency, bandwidth, directivity are presented through paragraphs of the paper. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3128128.3128138}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030317231\&doi=10.1145\%2f3128128.3128138\&partnerID=40\&md5=e989fe7cf53281bbd496b02834b4ced0}, author = {El Mejjatti, M. and Habbani, A. and Essaid, B. and Amraoui, H.} } @conference {ElAmrani20171973, title = {New exact method to solve multi-capacitated location problem using set partitioning formulation}, booktitle = {Proceedings of the International Conference on Industrial Engineering and Operations Management}, year = {2017}, note = {cited By 0}, pages = {1973-1982}, abstract = {In this paper, we present one generalization of the famous capacitated p-median location problem, called budget constraint multi-capacitated location problem (MCLP). This generalization is characterized by allowing each facility to be used with different capacity levels. We consider n customers, m facilities and l capacity levels, we note that the solution shape of MCLP can be represented as a set of disjoint clusters, each cluster is composed of one facility and a subset of customers. When creating clusters, some constraints must be met, namely the level selection and capacity. In this work, we present the new formulation of the MCLP based on set partitioning, then we suggest an adapted solving method, which will be called NFF (Nearest Facility First). The NFF approach is used in two ways: as a heuristic by taking only the first solution found or exact method when waiting finish the execution. Computational results are presented at the end using instances that we have created under some criteria of difficulties or adapted from those of p-median problems available in literature. The NFF method provides very good results for low and medium difficulty instances, but it is less effective for the more complex ones. To remedy this problem, the method will be supplemented by column generation approach. {\textcopyright} IEOM Society International.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018949470\&partnerID=40\&md5=4b843bd615c5260fc757d470ecea56ca}, author = {El Amrani, M. and Benadada, Y. and Gendron, B.} } @conference {Addi2017, title = {New hard decision decoder of LDPC codes using single bit flipping algorithm}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {The Bit-Flipping (BF) algorithm is considered as a hard decoding method for LDPC codes. It is much simpler than the probabilistic methods like Sum Product Algorithm (SPA), and can be efficiently implemented by electronic circuits. In this paper, we propose a new Bit Flipping algorithm for Low-Density Parity- Check codes (LDPC) called Single Bit-Flipping (SBF). Compared to the Gallager Bit-Flipping algorithm, the proposed algorithm employs criteria of flipping single bit chosen carefully. This method eliminates the risk for flipping more than one bit at a time that can induce additional error bits and propagate the errors to the later iterations. We present some results obtained by applying this new method that provides a gain in performance in comparison to the standard Gallager Bit-Flipping with low complexity. {\textcopyright}2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238191}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041431498\&doi=10.1109\%2fWINCOM.2017.8238191\&partnerID=40\&md5=51139715da4295938142bee17d106a96}, author = {Addi, S. and Berkani, A. and Azouaoui, A. and Belkasmi, M.} } @article { ISI:000389092100006, title = {A nifty collaborative intrusion detection and prevention architecture for Smart Grid ecosystems}, journal = {COMPUTERS \& SECURITY}, volume = {64}, year = {2017}, month = {JAN}, pages = {92-109}, abstract = {Smart Grid (SG) systems are critical, intelligent infrastructure utility services connected through open networks that are potentially susceptible to cyber-attacks with very acute security risks of shutdown, loss of life, and loss of revenue. Traditional intrusion detection systems based on signature and anomaly techniques are no longer sufficient to protect SGs due to their new connectivity and management challenges, the ever-rapidly-evolving masquerades, and cyber criminality levied against them. SGs require cyber-security systems to render them resilient and protected through advanced Intrusion Detection and Prevention System (IDPS) techniques and mechanisms. This paper proposes a smart collaborative advanced IDPS to provide the best possible protection of SGs with a fully distributed management structure that supports the network and host based detections and the prevention of attacks. By facilitating a reliable, scalable, and flexible design, the specific requirements of IDPS for SGs can be more easily met via a fuzzy risk analyzer, an independent and ontology knowledge based inference engine module. These can work collaboratively by managing functions across multiple IDPS domains. A set of extensive and intensive simulated experiments shows that with its smart advanced components incorporating soft computing machine-learning techniques and a rich ontology knowledge base with fuzzy logic analysis, it detects and prevents intrusions more efficiently. The multi-faceted results of the simulation also show that the proposed Collaborative Smart IDPS (CSIDPS) system increases the intrusion detection accuracy and decreases the false positive alarms when compared to traditional IDPSs. This is epitomized by the skillful use of the confusion matrix technique for organizing classifiers, visualizing their performance, and assessing their overall behavior. In the final analysis, the CSIDPS architecture is designed toward contributing to de facto norms for SG ecosystems. (C) 2016 Elsevier Ltd. All rights reserved.}, issn = {0167-4048}, doi = {10.1016/j.cose.2016.07.002}, author = {Patel, Ahmed and Alhussian, Hitham and Pedersen, Jens Myrup and Bounabat, Bouchaib and Celestino Junior, Joaquim and Katsikas, Sokratis} } @article {Patel201792, title = {A nifty collaborative intrusion detection and prevention architecture for Smart Grid ecosystems}, journal = {Computers and Security}, volume = {64}, year = {2017}, note = {cited By 0}, pages = {92-109}, abstract = {Smart Grid (SG) systems are critical, intelligent infrastructure utility services connected through open networks that are potentially susceptible to cyber-attacks with very acute security risks of shutdown, loss of life, and loss of revenue. Traditional intrusion detection systems based on signature and anomaly techniques are no longer sufficient to protect SGs due to their new connectivity and management challenges, the ever-rapidly-evolving masquerades, and cyber criminality levied against them. SGs require cyber-security systems to render them resilient and protected through advanced Intrusion Detection and Prevention System (IDPS) techniques and mechanisms. This paper proposes a smart collaborative advanced IDPS to provide the best possible protection of SGs with a fully distributed management structure that supports the network and host based detections and the prevention of attacks. By facilitating a reliable, scalable, and flexible design, the specific requirements of IDPS for SGs can be more easily met via a fuzzy risk analyzer, an independent and ontology knowledge-based inference engine module. These can work collaboratively by managing functions across multiple IDPS domains. A set of extensive and intensive simulated experiments shows that with its smart advanced components incorporating soft computing machine-learning techniques and a rich ontology knowledge base with fuzzy logic analysis, it detects and prevents intrusions more efficiently. The multi-faceted results of the simulation also show that the proposed Collaborative Smart IDPS (CSIDPS) system increases the intrusion detection accuracy and decreases the false positive alarms when compared to traditional IDPSs. This is epitomized by the skillful use of the confusion matrix technique for organizing classifiers, visualizing their performance, and assessing their overall behavior. In the final analysis, the CSIDPS architecture is designed toward contributing to de facto norms for SG ecosystems. {\textcopyright} 2016 Elsevier Ltd}, doi = {10.1016/j.cose.2016.07.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84994874812\&doi=10.1016\%2fj.cose.2016.07.002\&partnerID=40\&md5=5f6f90e6647d272108664e9c5f43cf5e}, author = {Patel, A.a b and Alhussian, H.c and Pedersen, J.M.d and Bounabat, B.e and J{\'u}nior, J.C.a and Katsikas, S.f} } @article {Patel201792, title = {A nifty collaborative intrusion detection and prevention architecture for Smart Grid ecosystems}, journal = {Computers and Security}, volume = {64}, year = {2017}, note = {cited By 3}, pages = {92-109}, abstract = {Smart Grid (SG) systems are critical, intelligent infrastructure utility services connected through open networks that are potentially susceptible to cyber-attacks with very acute security risks of shutdown, loss of life, and loss of revenue. Traditional intrusion detection systems based on signature and anomaly techniques are no longer sufficient to protect SGs due to their new connectivity and management challenges, the ever-rapidly-evolving masquerades, and cyber criminality levied against them. SGs require cyber-security systems to render them resilient and protected through advanced Intrusion Detection and Prevention System (IDPS) techniques and mechanisms. This paper proposes a smart collaborative advanced IDPS to provide the best possible protection of SGs with a fully distributed management structure that supports the network and host based detections and the prevention of attacks. By facilitating a reliable, scalable, and flexible design, the specific requirements of IDPS for SGs can be more easily met via a fuzzy risk analyzer, an independent and ontology knowledge-based inference engine module. These can work collaboratively by managing functions across multiple IDPS domains. A set of extensive and intensive simulated experiments shows that with its smart advanced components incorporating soft computing machine-learning techniques and a rich ontology knowledge base with fuzzy logic analysis, it detects and prevents intrusions more efficiently. The multi-faceted results of the simulation also show that the proposed Collaborative Smart IDPS (CSIDPS) system increases the intrusion detection accuracy and decreases the false positive alarms when compared to traditional IDPSs. This is epitomized by the skillful use of the confusion matrix technique for organizing classifiers, visualizing their performance, and assessing their overall behavior. In the final analysis, the CSIDPS architecture is designed toward contributing to de facto norms for SG ecosystems. {\textcopyright} 2016 Elsevier Ltd}, doi = {10.1016/j.cose.2016.07.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84994874812\&doi=10.1016\%2fj.cose.2016.07.002\&partnerID=40\&md5=5f6f90e6647d272108664e9c5f43cf5e}, author = {Patel, A. and Alhussian, H. and Pedersen, J.M. and Bounabat, B. and J{\'u}nior, J.C. and Katsikas, S.} } @conference {Yatribi2017, title = {Non-binary cyclic majority-logic decodable codes: An algebraic construction by using Genetic Algorithms}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 0}, abstract = {In this paper, the construction of non binary cyclic One-Step Majority-Logic decoding codes from the dual domain and idempotents is investigated. This had led us to propose a new design algorithm based on Genetic Algorithms, as an extension to previous works on the binary field. With the proposed algorithm, we were able to obtain long new non-binary cyclic OSMLD codes with high coding rates and good correction capacities. In fact, two powerful properties of the algebraic construction are provided, firstly, the designed codes have their minimal distances dmin and dimensions calculated analyticaly, secondly, they can be decoded with a low-complexity majority-voting decoding scheme. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843935}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015215915\&doi=10.1109\%2fACOSIS.2016.7843935\&partnerID=40\&md5=4e462ad826ef141669b51bb0bf07198c}, author = {Yatribi, A. and Belkasmi, M. and Ayoub, F. and M{\textquoteright}rabet, Z.} } @conference {Zakaria2017, title = {Non-binary Euclidean Geometry codes: Majority Logic Decoding}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 0}, abstract = {Non-binary One Step Majority Logic decodable (OSMLD) codes have several advantages over their binary counterparts but unfortunately their decoding complexity is significantly challenging. In this paper, we propose two contributions. Our first contribution is to use the Majority-Logic Decoding (MLGD) algorithm for non-binary cyclic OSMLD codes, since it involves only finite field addition and multiplication, in addition to a majority vote, and hence has significantly lower complexity than other decoding algorithms, which seems to be an attractive choice. The second contribution is to use finite geometry codes, even those of prime fields, because they have a large number of orthogonal equations which makes them good candidates for the MLGD algorithm, so we can benefit from its low complexity. We also investigate the power correction of this algorithm and the results are quite satisfying. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843943}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015156977\&doi=10.1109\%2fACOSIS.2016.7843943\&partnerID=40\&md5=8b93306a50f5227acb219e0067afb94a}, author = {Zakaria, M. and Fouad, A. and Mostafa, B. and Anouar, Y. and El Abidine Alaoui Ismaili, Z.} } @conference {Bouzbita2017633, title = {A novel based Hidden Markov Model approach for controlling the ACS-TSP evaporation parameter}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {633-638}, abstract = {The aim of this paper is to propose a new method capable of dynamically controlling the evaporation parameter in an Ant Colony System (ACS) using a Hidden Markov Model. The purpose is to improve the performance of ACS by controlling the exploration and exploitation in the search space. To this end, two HMM approaches are proposed. The first is a training method that best suits the observed data of the Hidden Markov Model. The second is a method that dynamically controls the adapted parameter by applying several processes. To test our algorithm we used a set of Travelling Salesman Problem (TSP) instances. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905544}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019108752\&doi=10.1109\%2fICMCS.2016.7905544\&partnerID=40\&md5=2acc848761420a801ab6567757203ac4}, author = {Bouzbita, S. and El Afia, A. and Faizi, R.} } @conference {Bajtaoui2017979, title = {A novel compact CPW OCSRR strucutre for 2.45 GHz rectenna application}, booktitle = {Proceedings of 2016 International Renewable and Sustainable Energy Conference, IRSEC 2016}, year = {2017}, note = {cited By 0}, pages = {979-982}, abstract = {In this paper, a new and compact rectenna is proposed for the application of ISM band at 2.45 GHz using open complementary split ring resonator (OCSRR). The simulated rectifying efficiency and DC voltage were, respectively 57\% and 0.9 V when the input power to the rectifying circuit was 0dBm (1mW). The highest efficiency, 62\%, was achieved at 2.45 GHz for 3dBm input power and for a charge load 1kΩ. {\textcopyright} 2016 IEEE.}, doi = {10.1109/IRSEC.2016.7984042}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027837617\&doi=10.1109\%2fIRSEC.2016.7984042\&partnerID=40\&md5=ac70fead05a7a1e13d8ee663959766a4}, author = {Bajtaoui, M. and Abraray, A. and El Mrabet, O. and Aznabet, M. and Essaaidi, M.} } @article {Aoutoul2017125, title = {A novel interconnection technique using zero-degree phase shifting microstrip TL for RF QFN package at S-band}, journal = {Progress in Electromagnetics Research Letters}, volume = {67}, year = {2017}, note = {cited By 0}, pages = {125-130}, abstract = {In this paper, we propose a novel interconnection technique for a flip-chip quad flat no-lead (FC QFN) package which can decrease the amount of the transmission line (TL) phase shift. The RF die inputs and outputs (I/O) are connected to the package lead fingers by a small size, 1000 {\textmu}m length, microstrip line having a gap capacitor consisting of staked plates (fingers) where the space in between is filled by a ceramic material of 10.2 dielectric constant value. This technique can reduce the effect of transmission line inductance and makes the novel package interconnection behaving as a composite left right handed (CLRH) TL; hence, one can set the TL phase shift to zero degree at the desired operating frequency band (i.e., S-band) by just tuning geometrical and/or physical interconnection structure parameters. {\textcopyright} 2017, Electromagnetics Academy. All rights reserved.}, doi = {10.2528/PIERL17031301}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019546909\&doi=10.2528\%2fPIERL17031301\&partnerID=40\&md5=4be55e2571c969bc93a364b5554f37fc}, author = {Aoutoul, M. and Haddad, A. and Essaaidi, M. and Faqir, M.} } @conference {Moumen2017, title = {A novel text-to-speech synthesizer for processing sms in moroccan Arabic}, booktitle = {Proceedings of International Conference on Computers and Industrial Engineering, CIE}, year = {2017}, note = {cited By 0}, abstract = {This paper proposes a novel text-to-speech (TTS) system that converts text messages (SMS) into speech sequences. This system takes into consideration the characteristics of the Moroccan context, where several languages might be used in the same text message. Therefore, the languages that are analyzed in this work are English, French, Moroccan Arabic and Modern Standard Arabic (MSA). Processing French, English, or Modern Standard Arabic is common and has given satisfying results. However, Moroccan Arabic remains under-studied for many reasons, particularly the lack of data resources given that this Arabic dialect is primarily spoken and non-codified.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85040947811\&partnerID=40\&md5=1ddd4fb5d94d0350d8211d7bc3fc7c61}, author = {Moumen, R. and Chiheb, R. and Zarra, T. and Faizi, R. and El Afia, A.} } @conference {Essadi2017, title = {Operators role-based approach to coordinate between heterogeneous DSLs}, booktitle = {2017 International Conference on Wireless Technologies, Embedded and Intelligent Systems, WITS 2017}, year = {2017}, note = {cited By 0}, abstract = {Nowadays systems involve many business domains and are consequently designed by teams of experts with different concerns. Every team uses its Domain Specific Language (DSL) to elaborate heterogeneous models describing different parts of the same system. These heterogeneous models need to be coordinated and integrated, to get a whole view of the system, to ease its validation and to uncover inconsistencies between heterogeneous models. However, many approaches and ways of integration has been already explored, one of important approach among them is coordination between DSLs instead of models, doing so coordination became automatic instead of being manual and tailored to every instance of models. Actually, this work fits in this class by giving a classification of DSLs relationships and their respective operators used to resolve DSLs heterogeneity. Moreover, in this paper we consider coordination as a separate role to be given to DSLs meta-classes responsible of coordination. Explicitly, these meta-classes need to override operations defined in the proposed coordination meta-model. As illustrative example, we present a telecommunication network supervision system where two different DSL are involved. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WITS.2017.7934657}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85021691244\&doi=10.1109\%2fWITS.2017.7934657\&partnerID=40\&md5=39f75645c1fcfbbea42c9bace8a1bbfd}, author = {Essadi, N. and Anwar, A. and Laghouaouta, Y.} } @conference {ElAfia2017, title = {Particle swarm optimization for model selection of aircraft maintenance predictive models}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Nowadays, predictive models -especially the ones based on machine learning- are widely used to solve many big data problems. One of the main challenges within predictive models is to choose the best model for each problem. In particular, model selection and feature selection are two important issues in machine learning models as they help to achieve the best results. This paper focuses on the restriction of these two problems to σ-SVR (support vector regression) and more specifically the optimization of both problems using the particle swarm optimization algorithm. Our approach is investigated in the estimation of remaining useful life (RUL) of aircrafts which a ects their maintenance planning and which is an interesting issue in predictive maintenances. That is, the experiment consists of predicting RUL of aircraft engines using an σ-SVR optimized by PSO. Experimental results show the efficiency of the proposed approach. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090402}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028474954\&doi=10.1145\%2f3090354.3090402\&partnerID=40\&md5=2d4328ae0f65ce92a35e5516b96bd0b7}, author = {El Afia, A. and Sarhani, M.} } @conference {Sahnoun2017726, title = {Path reliability metric for proactive MANET routing}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {726-730}, abstract = {One critical issue for routing in Mobile Ad-hoc Networks (MANET) is how to select reliable paths that can last as long as possible, The reliability of a path depends on the number of links and the reliability of each link constituting the path. In this paper we developed efficient routing metric prediction methods by predicting link stability using a probabilistic computation based on the normal-like distributions of the link lifetimes in typical MANET mobility scenarios, integrating it to proactive MANET routing OLSR, named Path Reliability OLSR (PR-OLSR). We show by simulation that PR-OLSR is more adaptive to the network dynamics and therefore is able to improve performance significantly on packet delivery ratio and prolonged network energy lifetime. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905622}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019101373\&doi=10.1109\%2fICMCS.2016.7905622\&partnerID=40\&md5=e6eeb0abb95c8eee7c04dea7ae26a2ac}, author = {Sahnoun, A. and El Abbadi, J. and Habbani, A.} } @article {Elmaroud2017550, title = {Performance analysis of asynchronous and non linear FBMC systems}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {10542 LNCS}, year = {2017}, note = {cited By 0}, pages = {550-561}, abstract = {This paper provides a theoretical analysis of the joint effect of synchronization errors and High Power Amplifiers (HPA) non linear distortions on filter bank based multicarrier (FBMC) systems. A promising class of FBMC modulation called Cosine Modulated Multitone (CMT) will be considered. For the studied system, analytical expressions of the signal to interference ratio (SIR) and bit error rate (BER) will be derived in the presence of HPA distortions, timing errors and carrier frequency offset (CFO). To this end, we have developed a closed-form expression of interference and useful signal powers using some acceptable approximations. The proposed model is compared with existing models designed for the considered system or for other multicarrier systems like SMT (Staggered Modulated Multitone) or OFDM. {\textcopyright} Springer International Publishing AG 2017.}, doi = {10.1007/978-3-319-68179-5_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85034587110\&doi=10.1007\%2f978-3-319-68179-5_48\&partnerID=40\&md5=6d16308c4170297309c0b8c954bb39aa}, author = {Elmaroud, B. and Faqihi, A. and Aboutajdine, D.} } @conference {Illi2017, title = {Performance analysis of dual-hop underwater communication system subject to κ-μ shadowed fading channel}, booktitle = {2016 International Conference on Advanced Communication Systems and Information Security, ACOSIS 2016 - Proceedings}, year = {2017}, note = {cited By 1}, abstract = {In this paper, a unified performance analysis of a dual-hop semi-blind amplify and forward relay over underwater wireless communication system is presented. Both RF links (Source-Relay (S-R) and Relay-Destination (R-D)), are modeled by independent and not necessarily identically distributed k shadowed fading channel. We derive an approximative closed form analytical expression for the cumulative distribution function (CDF) and the probability density function (PDF) of the total end-to-end SNR in terms of Kummer{\textquoteright}s Hypergeometric function. Based on these results, we present upper-bounded closed forms of communication systems performance criterion, such as outage probability and average bit/symbol error rate. All the derived analytical expressions are validated through computer-based simulation. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ACOSIS.2016.7843941}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85015177998\&doi=10.1109\%2fACOSIS.2016.7843941\&partnerID=40\&md5=beec34777a1490ccddd31b41f7105550}, author = {Illi, E. and El Bouanani, F. and Ayoub, F.} } @conference {Illi201785, title = {Performance analysis of mixed RF/FSO communication system with the presence of pointing error using the MGF-based approach}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F130526}, year = {2017}, note = {cited By 0}, pages = {85-92}, abstract = {In this paper, we address a performance analysis of a dual-hop mixed communication system over both radio-frequency (RF) and free-space-optical (FSO) links with the presence of pointing error impairments. Our system model consists of two communication paths, and the receiver uses the MRC combining scheme to combine the received signals through these both links. The first optical link consists of a source (S) and a destination (D) nodes communicating through a relay node (R), which receives the incoming information-bearing signal and forwards it to the destination (i.e. detect-and- forward relaying (Det-F)). Both (S-R) and (R-D) hops are operating under FSO link subject to Malaga-M turbulence model with the presence of pointing error. The second link is a direct RF link, connecting the source (S) directly to the destination (D), and is assumed to be subject to Rayleigh fading channel. We present in this paper the exact closed-form expression of the moment-generating function (MGF) of the total end-to-end SNR. Based on this result, we derive successfully the average symbol error rate of our mixed RF/FSO communication system using the MGF-based approach, in terms of either univariate or bivariate Fox{\textquoteright}s H-Function. All these derived analytical results are validated through computer-based simulation. {\textcopyright} 2017 ACM.}, doi = {10.1145/3128128.3128142}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030319379\&doi=10.1145\%2f3128128.3128142\&partnerID=40\&md5=ab876fa37904a4cccc838e58b6d1376e}, author = {Illi, E. and El Bouanani, F. and Ayoub, F.} } @conference {Illi2017, title = {A performance study of a hybrid 5G RF/FSO transmission system}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {This work presents a study on the performance of a hybrid 5G RF/FSO communication system. Both transmitter and receiver communicate through two paths: the first link operates under Radio-Frequency (RF) technology, assumed to undergo a Weibull fading channel, while the second link operates under Free Space Optical (FSO) technology, subject to M{\'a}laga-Mturbulence model with the presence of pointing error impairment. Both incoming signals are combined at the receiver side using Maximal Ratio Combining (MRC) technique. We retrieve, in particular, the Moment Generating Function (MGF) of the total Signal-to-Noise Ratio (SNR). Based on that, we present the exact closedform expression of the Average Symbol Error Rate (ASER) of the considered RF/FSO system for different modulations schemes in terms of the bivariate Fox{\textquoteright}s H-Function. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238167}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041442507\&doi=10.1109\%2fWINCOM.2017.8238167\&partnerID=40\&md5=bd6592962b5b523d17722b84179ab4a4}, author = {Illi, E. and El Bouanani, F. and Ayoub, F.} } @conference {Khaldi2017, title = {Prediction of supplier performance: A novel DEA-ANFIS based approach}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {The focus of this paper is on investigating the feasibility of using ANFIS combined with DEA for supplier{\textquoteright}s post-evaluation. The proposed framework aims at modeling performance measurement, and forecasting of a selected hospital{\textquoteright}s drug suppliers. Even though it is broadly employed as a benchmarking tool to evaluate DMUs efficiency, DEA can hardly be used to predict the performance of unseen DMUs. For this reason, ANFIS model has been integrated to DEA due to its nonlinear mapping, strong generalization capabilities and pattern prediction functionalities. DEA based BCC model is used to evaluate the efficiency scores of a set of suppliers, then ANFIS intervenes to learn DEA patterns and to forecast the performance of new suppliers. The results of this research highlight the prediction power of the proposed model in a new scope. They present it as an efficient benchmarking tool and a promising decision support system applied at the operational level. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090416}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028459962\&doi=10.1145\%2f3090354.3090416\&partnerID=40\&md5=da01d9720d77f4831205623526bfc9d9}, author = {Khaldi, R. and Chiheb, R. and El Afia, A. and Akaaboune, A. and Faizi, R.} } @article {Niharmine20173087, title = {Recognition of handwritten tifinagh characters using gradient direction features}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {95}, number = {13}, year = {2017}, note = {cited By 0}, pages = {3087-3095}, abstract = {Optical Character Recognition system goal is to convert handwritten characters input images to an editable text. Many OCR techniques have been developed by researchers for Latin and Arabic languages. Amazigh language still have few works in this area. This paper emphasizes a new methodology to recognize the Tifinagh characters using zoning gradient features with a high accuracy and recognition rate. The new methodology is based on gradient direction as features and artificial neural networks as a classifier. The novelty of the new proposed system is the high accuracy and the training time which is very small compared with other classifiers. {\textcopyright} 2005 {\textendash} ongoing JATIT \& LLS.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85024366252\&partnerID=40\&md5=012b6056b533d3ac46e12de7824b5b98}, author = {Niharmine, L. and Outtaj, B. and Azouaoui, A.} } @conference {ElMaleky2017451, title = {Reconfigurable T-shaped antenna for S-band applications}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {451-455}, abstract = {This paper present a reconfigurable T-shaped antenna how can operate easily at seven frequencies in S-band, the switching technique consist of an equivalent approach to PIN Diode it is to change the state of the distribution of current by insert a simple micro strip line in different positions, the simulated and de measured prototype frequency is respectively 3.12 GHz, 3.2 GHz, 3.31 GHz, 3.44 GHz, 3.56 GHz, 3.76 GHz, 4 GHz. The antenna is fabricated using FR4 substrate with relative permittivity of 4.4 and pert tangent of 0.02. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905560}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019076990\&doi=10.1109\%2fICMCS.2016.7905560\&partnerID=40\&md5=1c70f996a2f8b128e94a389883fb7b80}, author = {El Maleky, O. and Ben Abdelouahab, F. and Essaaidi, M. and Ajana, L.} } @conference {Alaoui2017114, title = {Semantic approach for the building of user profile for recommender system}, booktitle = {Proceedings - 2016 Global Summit on Computer and Information Technology, GSCIT 2016}, year = {2017}, note = {cited By 0}, pages = {114-119}, abstract = {The major problem in the use of the Web is that of searching for relevant information that meets the expectations of a user. This problem increases every day and especially with the emergence of web 2.0 or social web. Our paper, therefore, ignores the disadvantage of social web and operates it to rich user profile. {\textcopyright} 2016 IEEE.}, doi = {10.1109/GSCIT.2016.27}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027454775\&doi=10.1109\%2fGSCIT.2016.27\&partnerID=40\&md5=3af5bd399eb70f7167e5ea8ae78eb4df}, author = {Alaoui, S. and Ajhoun, R. and El Bouzekri El Idrissi, Y.} } @article {12059907120170107, title = {Sensitivity analysis of FBMC-based multi-cellular networks to synchronization errors and HPA nonlinearities.}, journal = {EURASIP Journal on Advances in Signal Processing}, volume = {2017}, number = {1}, year = {2017}, pages = {1 - 10}, abstract = {In this paper, we study the performance of asynchronous and nonlinear FBMC-based multi-cellular networks. The considered system includes a reference mobile perfectly synchronized with its reference base station (BS) and K interfering BSs. Both synchronization errors and high-power amplifier (HPA) distortions will be considered and a theoretical analysis of the interference signal will be conducted. On the basis of this analysis, we will derive an accurate expression of signal-to-noise-plus-interference ratio (SINR) and bit error rate (BER) in the presence of a frequency-selective channel. In order to reduce the computational complexity of the BER expression, we applied an interesting lemma based on the moment generating function of the interference power. Finally, the proposed model is evaluated through computer simulations which show a high sensitivity of the asynchronous FBMC-based multi-cellular network to HPA nonlinear distortions. [ABSTRACT FROM AUTHOR]}, keywords = {Bit error rate, Cellular neural networks (Computer science), Filter bank multicarrier, HPA NLD, Multi-cellular networks, Signal-to-interference-plus-noise ratio, Synchronization errors}, issn = {16876172}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=120599071\&site=ehost-live}, author = {Elmaroud, Brahim and Faqihi, Ahmed and Aboutajdine, Driss} } @article {Elmaroud2017, title = {Sensitivity analysis of FBMC-based multi-cellular networks to synchronization errors and HPA nonlinearities}, journal = {Eurasip Journal on Advances in Signal Processing}, volume = {2017}, number = {1}, year = {2017}, note = {cited By 0}, abstract = {In this paper, we study the performance of asynchronous and nonlinear FBMC-based multi-cellular networks. The considered system includes a reference mobile perfectly synchronized with its reference base station (BS) and K interfering BSs. Both synchronization errors and high-power amplifier (HPA) distortions will be considered and a theoretical analysis of the interference signal will be conducted. On the basis of this analysis, we will derive an accurate expression of signal-to-noise-plus-interference ratio (SINR) and bit error rate (BER) in the presence of a frequency-selective channel. In order to reduce the computational complexity of the BER expression, we applied an interesting lemma based on the moment generating function of the interference power. Finally, the proposed model is evaluated through computer simulations which show a high sensitivity of the asynchronous FBMC-based multi-cellular network to HPA nonlinear distortions. {\textcopyright} 2017, The Author(s).}, doi = {10.1186/s13634-016-0441-0}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85008705000\&doi=10.1186\%2fs13634-016-0441-0\&partnerID=40\&md5=28ba324c5eee82072f7555fc9f1f6df0}, author = {Elmaroud, B.a and Faqihi, A.a b and Aboutajdine, D.a} } @article {Elmaroud2017, title = {Sensitivity analysis of FBMC-based multi-cellular networks to synchronization errors and HPA nonlinearities}, journal = {Eurasip Journal on Advances in Signal Processing}, volume = {2017}, number = {1}, year = {2017}, note = {cited By 1}, abstract = {In this paper, we study the performance of asynchronous and nonlinear FBMC-based multi-cellular networks. The considered system includes a reference mobile perfectly synchronized with its reference base station (BS) and K interfering BSs. Both synchronization errors and high-power amplifier (HPA) distortions will be considered and a theoretical analysis of the interference signal will be conducted. On the basis of this analysis, we will derive an accurate expression of signal-to-noise-plus-interference ratio (SINR) and bit error rate (BER) in the presence of a frequency-selective channel. In order to reduce the computational complexity of the BER expression, we applied an interesting lemma based on the moment generating function of the interference power. Finally, the proposed model is evaluated through computer simulations which show a high sensitivity of the asynchronous FBMC-based multi-cellular network to HPA nonlinear distortions. {\textcopyright} 2017, The Author(s).}, doi = {10.1186/s13634-016-0441-0}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85008705000\&doi=10.1186\%2fs13634-016-0441-0\&partnerID=40\&md5=28ba324c5eee82072f7555fc9f1f6df0}, author = {Elmaroud, B. and Faqihi, A. and Aboutajdine, D.} } @article { ISI:000392177500001, title = {Sensitivity analysis of FBMC-based multi-cellular networks to synchronization errors and HPA nonlinearities}, journal = {EURASIP JOURNAL ON ADVANCES IN SIGNAL PROCESSING}, year = {2017}, month = {JAN 7}, abstract = {In this paper, we study the performance of asynchronous and nonlinear FBMC-based multi-cellular networks. The considered system includes a reference mobile perfectly synchronized with its reference base station (BS) and K interfering BSs. Both synchronization errors and high-power amplifier (HPA) distortions will be considered and a theoretical analysis of the interference signal will be conducted. On the basis of this analysis, we will derive an accurate expression of signal-to-noise-plus-interference ratio (SINR) and bit error rate (BER) in the presence of a frequency-selective channel. In order to reduce the computational complexity of the BER expression, we applied an interesting lemma based on the moment generating function of the interference power. Finally, the proposed model is evaluated through computer simulations which show a high sensitivity of the asynchronous FBMC-based multi-cellular network to HPA nonlinear distortions.}, issn = {1687-6180}, doi = {10.1186/s13634-016-0441-0}, author = {Elmaroud, Brahim and Faqihi, Ahmed and Aboutajdine, Driss} } @conference {ElAfia2017, title = {Supervised learning in branch-and-cut strategies}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Branch-and-Cut is a powerful algorithm used for solving MILP problems. It involves two main sub-algorithms: branch-and-bound and cutting plane. On the one hand, the branch-and-bound algorithm comprises two strategies that are node selection strategy and branching strategy. These two strategies in literature don{\textquoteright}t exploit information of each other,and variable branching strategy tryto find compromise between minimizing the number of processed nodes and minimizing solving time. On the other hand, cutting plane algorithmallow tightening bounds and reducing the number of processing nodes. Whereas the learning literature has been focused in dealing with just one strategy on the same time, we design a two-in-one strategy of branch-and-bound algorithm regarding the fact thatare intuitively dependent. In this perspective, we apply the well-known Support Vector Machine (SVM)algorithm to the well-known set of problems MIPLIB to learn the mentioned strategy that can be used to speed up the basic branch-and-bound algorithm. We use also cutting plane to speed up the algorithm. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090474}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028470798\&doi=10.1145\%2f3090354.3090474\&partnerID=40\&md5=bf88dbeb883a772c23ba54d4a017ef96}, author = {El Afia, A. and Kabbaj, M.M.} } @conference {Aitfares2017113, title = {Suspicious behavior detection of people by monitoring camera}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {113-117}, abstract = {The analytic video is a very challenging area of research in computer vision. Ensure a high level of security in a public space monitored by a surveillance camera is a difficult task in recent years. Understanding people behaviors in real time allows the surveillance systems to analyze unusual events through the video frames. In this paper, we propose a new approach for detecting suspicious behavior of moving people. We are not interested in a simple motion detection of a moving object, but we analyze the trajectory of this latter; relying on the object motion vector. Once a suspicious behavior suddenly occurs in this trajectory, we segment and track this object during its motion within the camera{\textquoteright}s field of view. Experiments with real-world images validate the efficiency of the proposed approach. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905601}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019074005\&doi=10.1109\%2fICMCS.2016.7905601\&partnerID=40\&md5=72fbf7b2e7ea117ae6bf2217a9fdc658}, author = {Aitfares, W. and Kobbane, A. and Kriouile, A.} } @conference {Sara2017244, title = {Time aware recommendation}, booktitle = {Proceedings - 6th International Conference on Information and Communication Technology for the Muslim World, ICT4M 2016}, year = {2017}, note = {cited By 1}, pages = {244-247}, abstract = {The overload of information can become a significant challenge in relation to information retrieval systems. Often users will need to carry out extensive research to get the information they desire. This issue will only become more challenging as the quantity of data available on the internet increases. This increase shows no signs of slowing down and inevitably demands better solutions. One such solution proposed in this paper will look at the quality of the service discovery, such as adaptation customizing recommendation. In our project we considered ways to customize the contextual recommendation by creating a time awareness system. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICT4M.2016.75}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013052828\&doi=10.1109\%2fICT4M.2016.75\&partnerID=40\&md5=2f395fcbdfe337e52f75ed942f05dffe}, author = {Sara, A. and El Bouzekri El Idrissi, Y. and Ajhoun, R.} } @conference {L{\textquoteright}Amrani2017, title = {Toward interoperability approach between federated systems}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {The main evolution of web services and its exploitation enforce new security challenges, especially in terms of digital identity life cycle management. A set of Identity Management Systems exist to deal with these identities, in order to improve users{\textquoteright} experience and gain secure access. Today we are faced with a large number of heterogeneous identity management approaches. In our study we treated several systems, among those, we present isolated model, centralized model, federated model and user centric model. The federated system makes proof of it eligibility for the identity management, therefore, we were interested in the federated model, which consist on the sharing of digital identity between different security domains , based on an agreement between the entities in communication. The Federated Identity Management (FIM) faces the problem of interoperability between heterogeneous identity federation systems. This study present a use case of interoperability among SAML and WS-Federation. We propose an approach that will permit to inter-operate heterogeneous federation systems and allow the exchange of identity data between them. {\textcopyright} 2017 Copyright held by the owner/author(s).}, doi = {10.1145/3090354.3090391}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028459216\&doi=10.1145\%2f3090354.3090391\&partnerID=40\&md5=e5b2e3571a8e174e206cf216b755135f}, author = {L{\textquoteright}Amrani, H. and Berroukech, B.E. and El Bouzekri El Idrissi, Y. and Ajhoun, R.} } @conference {ElBassiti201799, title = {Towards innovation excellence: Why and how to measure innovation performance?}, booktitle = {Proceedings - 6th International Conference on Information and Communication Technology for the Muslim World, ICT4M 2016}, year = {2017}, note = {cited By 0}, pages = {99-104}, abstract = {To maintain competitive advantage, today organizations need to be able to innovate - not just occasionally, but consistently. Mastering the process of innovation requires identifying the factors that support or hamper the achievement of innovations. The success of such process usually depends on the quality of the best opportunity identified, which is not enough. So, a systematic research and delivery framework spawning a set of performance measurements and improvement metrics is required, because, what is not measurable cannot be neither managed nor improved. This paper identifies three complementary components specifically developed to enable such measurement. First, Innovation Granularity Scales enabling highly targeted yet flexible performance analysis ranging from knowledge assessment to high level progressions and improvements; Second, Innovation Capability Stages referring to the minimum capabilities required by transformational milestones along the innovation continuum; Third, Innovation Maturity Levels representing the quality, predictability and performance within the innovation stages. This paper explores these complementary components and presents them as a systematic model underlying a specified innovation performance measurement framework. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICT4M.2016.73}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013042290\&doi=10.1109\%2fICT4M.2016.73\&partnerID=40\&md5=b94e0c3f51b5c6c4d3a7825a33a2bc8e}, author = {El Bassiti, L. and Ajhoun, R.} } @conference {Kabbaj2017621, title = {Towards learning integral strategy of branch and bound}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 1}, pages = {621-626}, abstract = {Branch and bound is the preferred algorithm used for solving MILP problems. It involves two fundamental strategies that are node selection strategy and branching strategy. Whereas the learning literature has been focused in dealing with just one strategy on the same time, we design a two-in-one strategy of branch and bound algorithm regarding the fact that are intuitively dependent. To do so, we apply the well-known SVM algorithm to the well-known set of problems MIPLIP. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905626}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019169526\&doi=10.1109\%2fICMCS.2016.7905626\&partnerID=40\&md5=8e62cf880e3eed29131a425775a7fa21}, author = {Kabbaj, M.M. and El Afia, A.} } @conference {Ennasar2017465, title = {A UHF RFID tag antenna with improved bandwidth}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2017}, note = {cited By 0}, pages = {465-467}, abstract = {in this paper, a new broadband RFID tag antenna is presented. The antenna has a simple structure and low profile with only one layer of FR4 dielectric substrate and metallization. By choosing a proper dimension of the short dipole and the T-matching network, a good and broadband impedance matching can be achieved. The obtained impedance bandwidth (S11\<-10 dB) across the operating band can reach about 232 MHz (24.57\%) for the UHF band, which is from 860 to 960 MHz The proposed design has an omnidirectional radiation pattern and a simulated flat read range (almost 4.05 m) over the entire RFID bandwidth. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICMCS.2016.7905571}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019074619\&doi=10.1109\%2fICMCS.2016.7905571\&partnerID=40\&md5=44b596ce0566805925e89ab907dc466b}, author = {Ennasar, M.A. and Essaaidi, M. and Aznabet, I. and El Mrabet, O.} } @article {Hosni20171, title = {On the value of parameter tuning in heterogeneous ensembles effort estimation}, journal = {Soft Computing}, year = {2017}, note = {cited By 0; Article in Press}, pages = {1-34}, abstract = {Accurate estimation of software development effort estimation (SDEE) is fundamental for efficient management of software development projects as it assists software managers to efficiently manage their human resources. Over the last four decades, while software engineering researchers have used several effort estimation techniques, including those based on statistical and machine learning methods, no consensus has been reached on the technique that can perform best in all circumstances. To tackle this challenge, Ensemble Effort Estimation, which predicts software development effort by combining more than one solo estimation technique, has recently been investigated. In this paper, heterogeneous ensembles based on four well-known machine learning techniques (K-nearest neighbor, support vector regression, multilayer perceptron and decision trees) were developed and evaluated by investigating the impact of parameter values of the ensemble members on estimation accuracy. In particular, this paper evaluates whether setting ensemble parameters using two optimization techniques (e.g., grid search optimization and particle swarm) permits more accurate estimates of SDEE. The heterogeneous ensembles of this study were built using three combination rules (mean, median and inverse ranked weighted mean) over seven datasets. The results obtained suggest that: (1) Optimized single techniques using grid search or particle swarm optimization provide more accurate estimation; (2) in general ensembles achieve higher accuracy than their single techniques whatever the optimization technique used, even though ensembles do not dominate over all single techniques; (3) heterogeneous ensembles based on optimized single techniques provide more accurate estimation; and (4) generally, particle swarm optimization and grid search techniques generate ensembles with the same predictive capability. {\textcopyright} 2017 Springer-Verlag GmbH Germany, part of Springer Nature}, doi = {10.1007/s00500-017-2945-4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035807309\&doi=10.1007\%2fs00500-017-2945-4\&partnerID=40\&md5=83d094f56b80a21295cf4230621f445d}, author = {Hosni, M. and Idri, A. and Abran, A. and Nassif, A.B.} } @conference {Annouch2017193, title = {Variable neighborhood search heuristic for the full truckload problem in liquefied petroleum gas supply}, booktitle = {2017 International Colloquium on Logistics and Supply Chain Management: Competitiveness and Innovation in Automobile and Aeronautics Industries, LOGISTIQUA 2017}, year = {2017}, note = {cited By 0}, pages = {193-198}, abstract = {In this paper, we propose a generalized version of the full truckload vehicle routing problem FTVRP in the liquefied petroleum gas (LPG) supply chain. We focus on the supply component that is considered to be the main element of LPG supply chain. The objective consists of determining a schedule of weekly supply which minimizes as well the gas acquisition cost as that of its transport. In order to effectively solve this problem, the mathematical model is established and a neighboring search solution heuristics (VNS) is proposed. The initial solution is generated using a Greedy Algorithm by selecting the most beneficial nodes in terms of transportation cost and gas acquisition cost. In the experiment, different sizes{\textquoteright} problems are used and the numerical results getting with big instance are explored. {\textcopyright} 2017 IEEE.}, doi = {10.1109/LOGISTIQUA.2017.7962897}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85026655256\&doi=10.1109\%2fLOGISTIQUA.2017.7962897\&partnerID=40\&md5=2b385b6056aad92e333bd54bb956201d}, author = {Annouch, A. and Bellabdaoui, A.} } @conference {Alouane2017116, title = {Virtualization in Cloud Computing: Existing solutions and new approach}, booktitle = {Proceedings of 2016 International Conference on Cloud Computing Technologies and Applications, CloudTech 2016}, year = {2017}, note = {cited By 0}, pages = {116-123}, abstract = {Virtualization and virtual environments are fundamental basics for data sharing in Cloud Computing. It is benefic for both the guest user and the provider: while it provides the first with the elements needed to execute his request, it gives the second the ability to be housing different guests with no additional cost. The central component in a virtual architecture is called hypervisor, having extra-privileges, which makes it able to play fundamental role of managing the sharing of data and resources. This hypervisor has many advantages regarding the cost, the simplicity of execution, the availability... But in the other hand, the major role played by this element makes it the perfect target for malicious users aiming to attack the virtual system. In this paper, we will analyze different approaches which try to solve the security issues of hypervisors, and then we will propose our own contribution to increase security in hypervisor related architectures. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CloudTech.2016.7847687}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013858869\&doi=10.1109\%2fCloudTech.2016.7847687\&partnerID=40\&md5=3848d0b00f65c22e3ac3d36e219d9ee9}, author = {Alouane, M. and El Bakkali, H.} } @conference { ISI:000389502600042, title = {ABAC Model for Collaborative Cloud Services}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {385}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, author = {Madani, Mohamed Amine and Erradi, Mohammed}, editor = {Abdulla, PA and DelporteGallet, C} } @article {11128948620160201, title = {Accuracy Comparison of Analogy-Based Software Development Effort Estimation Techniques.}, journal = {International Journal of Intelligent Systems}, volume = {31}, number = {2}, year = {2016}, pages = {128 - 152}, abstract = {Estimation by analogy is a commonly used software effort estimation technique and a suitable alternative to other conventional estimation techniques: It predicts the effort of the target project using information from former similar projects. While it is relatively easy to handle numerical attributes, dealing with categorical attributes is one of the most difficult issues for analogy-based estimation techniques. Therefore, we propose, in this paper, a novel analogy-based approach, called 2FA-kprototypes, to predict effort when software projects are described by a mix of numerical and categorical attributes. To this aim, the well-known fuzzy k-prototypes algorithm is integrated into the process of estimation by analogy. The estimation accuracy of 2FA-kprototypes was evaluated and compared with that of two techniques: (1) classical analogy-based technique and (2) 2FA-kmodes, which is a technique that we have developed recently. The comparison was performed using four data sets that are}, keywords = {Algorithms, Artificial intelligence, Computer software development, Fuzzy control systems, Numerical analysis, Prototypes}, issn = {08848173}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=111289486\&site=ehost-live}, author = {Idri, Ali and Amazal, Fatima Azzahra and Abran, Alain} } @article { ISI:000365798200003, title = {Accuracy Comparison of Analogy-Based Software Development Effort Estimation Techniques}, journal = {INTERNATIONAL JOURNAL OF INTELLIGENT SYSTEMS}, volume = {31}, number = {2, SI}, year = {2016}, month = {FEB}, pages = {128-152}, abstract = {Estimation by analogy is a commonly used software effort estimation technique and a suitable alternative to other conventional estimation techniques: It predicts the effort of the target project using information from former similar projects. While it is relatively easy to handle numerical attributes, dealing with categorical attributes is one of the most difficult issues for analogy-based estimation techniques. Therefore, we propose, in this paper, a novel analogy-based approach, called 2FA-kprototypes, to predict effort when software projects are described by a mix of numerical and categorical attributes. To this aim, the well-known fuzzy k-prototypes algorithm is integrated into the process of estimation by analogy. The estimation accuracy of 2FA-kprototypes was evaluated and compared with that of two techniques: (1) classical analogy-based technique and (2) 2FA-kmodes, which is a technique that we have developed recently. The comparison was performed using four data sets that are quite diverse and have different sizes: ISBSG, COCOMO, USP05-FT, and USP05-RQ. The results obtained showed that both 2FA-kprototypes and 2FA-kmodes perform better than classical analogy. (C) 2015 Wiley Periodicals, Inc.}, issn = {0884-8173}, doi = {10.1002/int.21748}, author = {Idri, Ali and Amazal, Fatima Azzahra and Abran, Alain} } @article {Elhadjv20163239, title = {An accurate recognizer for basic Arabic sounds}, journal = {ARPN Journal of Engineering and Applied Sciences}, volume = {11}, number = {5}, year = {2016}, note = {cited By 0}, pages = {3239-3243}, abstract = {This paper is part of an ongoing work aiming to build an accurate Arabic sounds recognizer for teaching and learning purposes. Early phases of this work were dedicated to the development of a particular sound database from recitations of the Holy Quran to cover classical Arabic sounds; speech signals of this sound database were manually segmented and labelled on three levels: Word, phoneme, and allophone. Next, two baseline recognizers were built to validate the speech segmentation on both phoneme and allophone levels and also to test the feasibility of the sounds{\textquoteright} recognition intended target. This current phase considers the development of an elaborated recognizer, by considering the basic sounds and looking for their distinctive features (e.g. duration, energy, etc.) to determine which ones will be particularly helpful to identify the phonological variation of the basic sound. Here, we present the first results of the basic sounds recognition obtained so far. {\textcopyright} 2006-2016 Asian Research Publishing Network (ARPN).}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84961680252\&partnerID=40\&md5=b6050206ac83191694490b10877629ec}, author = {Elhadjv, Y.O.M.a b and Khelifa, M.O.M.c and Abdellah, Y.d and Belkasmi, M.c} } @conference {ElHamlaoui201618, title = {Alignment of viewpoint heterogeneous design models: "Emergency Department" Case Study}, booktitle = {CEUR Workshop Proceedings}, volume = {1731}, year = {2016}, note = {cited By 0}, pages = {18-27}, abstract = {Generally, various models can be used to describe a given application domain on different aspects and thus give rise to several views. To have a complete view of the application domain, heterogeneous models need to be unified, which is a hard task to do. To tackle this problem, we have proposed a method to relate partial models without combining them in a single model. In our approach, partial models are organized as a network of models through a virtual global model called M1C (Model of correspondences between models) which conforms to a ubiquitous language based on a Meta-Model of Correspondences (MMC). This paper presents an application of our method to an "Emergency Department" case study. It has been performed as a collaborative process involving model designers and a supervisor. The focus is put on the building of the M1C model from 3 partial models.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84999098977\&partnerID=40\&md5=f0512c94d99ecc99fcd8fc9336ce1668}, author = {El Hamlaoui, M.a b and Coulette, B.b and Ebersold, S.b and Bennani, S.a and Nassar, M.a and Anwar, A.e and Beugnard, A.c and Bach, J.C.c and Jamoussi, Y.d and Tran, H.N.b} } @article {Ayache2016189, title = {Analysis and verification of XACML policies in a medical cloud environment}, journal = {Scalable Computing}, volume = {17}, number = {3}, year = {2016}, note = {cited By 2}, pages = {189-205}, abstract = {The connectivity of devices, machines and people via Cloud infrastructure can support collaborations among doctors and specialists from different medical organisations. Such collaborations may lead to data sharing and joint tasks and activities. Hence, the collaborating organisations are responsible for managing and protecting data they share. Therefore, they should define a set of access control policies regulating the exchange of data they own. However, existing Cloud services do not offer tools to analyse these policies. In this paper, we propose a Cloud Policy Verification Service (CPVS) for the analysis and the verification of access control policies specified using XACML. The analysis process detects anomalies at two policy levels: a) intra-policy: detects discrepancies between rules within a single security policy (conflicting rules and redundancies), and b) interpolicies: detects anomalies between several security policies such as inconsistency and similarity. The verification process consists in verifying the completeness property which guarantees that each access request is either accepted or denied by the access control policy. In order to demonstrate the efficiency of our method, we also provide the time and space complexities. Finally, we present the implementation of our method and demonstrate how efficiently our approach can detect policy anomalies. {\textcopyright} 2016 SCPE.}, doi = {10.12694/scpe.v17i3.1180}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84986332574\&doi=10.12694\%2fscpe.v17i3.1180\&partnerID=40\&md5=86b782c01a2231027ed2e7308e178385}, author = {Ayache, M.a and Erradi, M.a and Khoumsi, A.b and Freisleben, B.c} } @article {11714990120160901, title = {ANALYSIS AND VERIFICATION OF XACML POLICIES IN A MEDICAL CLOUD ENVIRONMENT.}, journal = {Scalable Computing: Practice \& Experience}, volume = {17}, number = {3}, year = {2016}, pages = {189 - 205}, abstract = {The connectivity of devices, machines and people via Cloud infrastructure can support collaborations among doctors and specialists from different medical organisations. Such collaborations may lead to data sharing and joint tasks and activities. Hence, the collaborating organisations are responsible for managing and protecting data they share. Therefore, they should define a set of access control policies regulating the exchange of data they own. However, existing Cloud services do not offer tools to analyse these policies. In this paper, we propose a Cloud Policy Verification Service (CPVS) for the analysis and the verification of access control policies specified using XACML. The analysis process detects anomalies at two policy levels: a) intra-policy: detects discrepancies between rules within a single security policy (conflicting rules and redundancies), and b) interpolicies: detects anomalies between several security policies such as inconsistency and similarity. The verification}, keywords = {Automata, Cloud computing, Completeness, Data security, Formal Verification, Information storage \& retrieval systems {\textendash} Medical care, Security Anomaly Detection, XACML Policies}, issn = {18951767}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=117149901\&site=ehost-live}, author = {Ayache, Meryeme and Erradi, Mohammed and Khoumsi, Ahmed and Freisleben, Bernd} } @article { ISI:000389146000004, title = {ANALYSIS AND VERIFICATION OF XACML POLICIES IN A MEDICAL CLOUD ENVIRONMENT}, journal = {Scalable Computing-Practice and Experience}, volume = {17}, number = {3, SI}, year = {2016}, month = {SEP}, pages = {189-205}, abstract = {The connectivity of devices, machines and people via Cloud infrastructure can support collaborations among doctors and specialists from different medical organisations. Such collaborations may lead to data sharing and joint tasks and activities. Hence, the collaborating organisations are responsible for managing and protecting data they share. Therefore, they should define a set of access control policies regulating the exchange of data they own. However, existing Cloud services do not offer tools to analyse these policies. In this paper, we propose a Cloud Policy Verification Service (CPVS) for the analysis and the verification of access control policies specified using XACML. The analysis process detects anomalies at two policy levels: a) intra-policy: detects discrepancies between rules within a single security policy (conflicting rules and redundancies), and b) inter policies: detects anomalies between several security policies such as inconsistency and similarity. The verification process consists in verifying the completeness property which guarantees that each access request is either accepted or denied by the access control policy. In order to demonstrate the efficiency of our method, we also provide the time and space complexities. Finally, we present the implementation of our method and demonstrate how efficiently our approach can detect policy anomalies.}, issn = {1895-1767}, doi = {10.12694/scpe.v17i3.1180}, author = {Ayache, Meryeme and Erradi, Mohammed and Khoumsi, Ahmed and Freisleben, Bernd} } @conference { ISI:000381755100012, title = {Applying Encryption Algorithm for Data Security in Cloud Storage}, booktitle = {ADVANCES IN UBIQUITOUS NETWORKING}, series = {Lecture Notes in Electrical Engineering}, volume = {366}, year = {2016}, note = {International Symposium on Ubiquitous Networking (UNet), Casablanca, MOROCCO, SEP 08-10, 2015}, pages = {141-154}, abstract = {This paper proposes a simple, secure, and privacy-preserving architecture for inter-Cloud data sharing based on an encryption/decryption algorithm which aims to protect the data stored in the cloud from the unauthorized access.}, isbn = {978-981-287-990-5; 978-981-287-989-9}, issn = {1876-1100}, doi = {10.1007/978-981-287-990-5\_12}, author = {Kartit, Zaid and Azougaghe, Ali and Kamal Idrissi, H. and El Marraki, M. and Hedabou, M. and Belkasmi, M. and Kartit, A.}, editor = {Sabir, E and Medromi, H and Sadik, M} } @article {Kartit2016141, title = {Applying encryption algorithm for data security in cloud storage}, journal = {Lecture Notes in Electrical Engineering}, volume = {366}, year = {2016}, note = {cited By 0}, pages = {141-154}, abstract = {This paper proposes a simple, secure, and privacy-preserving architecture for inter-Cloud data sharing based on an encryption/decryption algorithm which aims to protect the data stored in the cloud from the unauthorized access. {\textcopyright} Springer Science+Business Media Singapore 2016.}, doi = {10.1007/978-981-287-990-5_12}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959117395\&doi=10.1007\%2f978-981-287-990-5_12\&partnerID=40\&md5=5e4a7f778a62fb9bb090db998cc47f86}, author = {Kartit, Z.a and Azougaghe, A.b and Idrissi, H.K.a and Marraki, M.E.a and Hedabou, M.c and Belkasmi, M.b and Kartit, A.d} } @article {Khoumsi2016229, title = {An approach to resolve NP-hard problems of firewalls}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9944 LNCS}, year = {2016}, note = {cited By 0}, pages = {229-243}, abstract = {Firewalls are a common solution to protect information systems from intrusions. In this paper, we apply an automata-based methodology to resolve several NP-Hard problems which have been shown in the literature to be fundamental for the study of firewall security policies. We also compute space and time complexities of our resolution methods. {\textcopyright} Springer International Publishing AG 2016.}, doi = {10.1007/978-3-319-46140-3_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84990038429\&doi=10.1007\%2f978-3-319-46140-3_19\&partnerID=40\&md5=cd031e8992745830e08b31671e0e3416}, author = {Khoumsi, A.a and Erradi, M.b and Ayache, M.b and Krombi, W.b} } @conference { ISI:000389502600019, title = {An Approach to Resolve NP-Hard Problems of Firewalls}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {229-243}, abstract = {Firewalls are a common solution to protect information systems from intrusions. In this paper, we apply an automata-based methodology to resolve several NP-Hard problems which have been shown in the literature to be fundamental for the study of firewall security policies. We also compute space and time complexities of our resolution methods.}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, doi = {10.1007/978-3-319-46140-3\_19}, author = {Khoumsi, Ahmed and Erradi, Mohamed and Ayache, Meryeme and Krombi, Wadie}, editor = {Abdulla, PA and DelporteGallet, C} } @conference {Slimani2016266, title = {Artificial neural networks for demand forecasting: Application using Moroccan supermarket data}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {266-271}, abstract = {The accuracy of sales forecasts in a supply chain is certainly an important key to competitiveness. Because, for any member of the supply chain system, having a clear vision of the future demand affects his planning, his performance so his profit. In the first study of this work, various Artificial Neural Network models were presented and utilized to predict demand of a costumer{\textquoteright}s product. The training and validating data are provided from a known supermarket in Morocco. In a previous study, the results indicated that the best neural network structure for demand forecasting is the Multi Layer Perceptron, which is by the way, the most commonly used model in the literature. This work focuses on finding the optimal Multi Layer Perceptron structure for demand forecasting. We also present a review of selected works done in the application of game theory and neural networks in the context of management science. The main contribution of our work is the use of neural networks in order to predict the consumer{\textquoteright}s demand and implement this demand forecasting in a two-echelon supply chain with a game theoretic approach. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489236}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978383954\&doi=10.1109\%2fISDA.2015.7489236\&partnerID=40\&md5=e101e792765c4a46c2bf395db5f4b1b1}, author = {Slimani, I.a and El Farissi, I.b and Achchab, S.a} } @conference { ISI:000382672700052, title = {Botched Releases: Do we Need to Roll Back? Empirical Study on a Commercial Web App}, booktitle = {2016 IEEE 23RD INTERNATIONAL CONFERENCE ON SOFTWARE ANALYSIS, EVOLUTION, AND REENGINEERING (SANER), VOL 1}, year = {2016}, note = {23rd IEEE International Conference on Software Analysis, Evolution, and Reengineering (SANER), Osaka, JAPAN, MAR 14-18, 2016}, pages = {574-583}, publisher = {IEEE; IEEE Comp Soc; IPA; Reengineering Forum; Informat Proc Soc Japan, Special Interest Grp Software Engn; Japan Soc Software Sci \& Technol, Special Interest Grp Fdn Software Engn; Osaka Univ, Grad Sch Informat Sci \& Technol; Mitsubishi Elect; Microsoft;}, organization = {IEEE; IEEE Comp Soc; IPA; Reengineering Forum; Informat Proc Soc Japan, Special Interest Grp Software Engn; Japan Soc Software Sci \& Technol, Special Interest Grp Fdn Software Engn; Osaka Univ, Grad Sch Informat Sci \& Technol; Mitsubishi Elect; Microsoft;}, abstract = {Few minutes after a web-based software release, the release team might encounter log traces showing the new system crashing, hanging, or having poor performance. This is the start of the most nerve-wrecking moments of a product{\textquoteright}s release cycle, i.e., should one run the risk of not doing anything and users losing precious data, or of prematurely engaging the tedious (and costly) roll-back procedure towards the previous release? Thus far, only little attention has been paid by researchers to these so-called {\textquoteleft}{\textquoteleft}botched releases{{\textquoteright}{\textquoteright}}, partly because of lack of release log data. This paper studies 345 releases of a large e-commerce web app over a period of 1.5 years, in which we identified 17 recurrent root causes of botched releases, classified into four major categories. We then build explanatory models to understand which root causes are the most important, and to explore the factors leading to botched releases.}, isbn = {978-1-5090-1855-0}, doi = {10.1109/SANER.2016.114}, author = {Kerzazi, Noureddine and Adams, Bram} } @conference { ISI:000385791400001, title = {Cardiovascular Dysautonomias Diagnosis Using Crisp and Fuzzy Decision Tree: A Comparative Study}, booktitle = {HEALTH INFORMATICS MEETS EHEALTH}, series = {Studies in Health Technology and Informatics}, volume = {223}, year = {2016}, note = {10th eHealth Conference on Predictive Modeling in Healthcare - From Prediction to Prevention, Vienna, AUSTRIA, MAY 24-25, 2016}, pages = {1-8}, abstract = {Decision trees (DTs) are one of the most popular techniques for learning classification systems, especially when it comes to learning from discrete examples. In real world, many data occurred in a fuzzy form. Hence a DT must be able to deal with such fuzzy data. In fact, integrating fuzzy logic when dealing with imprecise and uncertain data allows reducing uncertainty and providing the ability to model fine knowledge details. In this paper, a fuzzy decision tree (FDT) algorithm was applied on a dataset extracted from the ANS (Autonomic Nervous System) unit of the Moroccan university hospital Avicenne. This unit is specialized on performing several dynamic tests to diagnose patients with autonomic disorder and suggest them the appropriate treatment. A set of fuzzy classifiers were generated using FID 3.4. The error rates of the generated FDTs were calculated to measure their performances. Moreover, a comparison between the error rates obtained using crisp and FDTs was carried out and has proved that the results of FDTs were better than those obtained using crisp DTs.}, isbn = {978-1-61499-645-3; 978-1-61499-644-6}, issn = {0926-9630}, doi = {10.3233/978-1-61499-645-3-1}, author = {Kadi, Ilham and Idri, Ali}, editor = {Schreier, G and Ammenwerth, E and Horbst, A and Hayn, D} } @conference {Benali2016, title = {Cloud environment assignment: A context-aware and Dynamic Software Product Lines-based approach}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, volume = {2016-July}, year = {2016}, note = {cited By 0}, abstract = {With the increase in number of mobile devices deployed in cloud computing, the demand of context-aware services to assign increases. Indeed, Information about the user{\textquoteright}s environment exposes new challenges to cloud computing in terms of location-aware, time-aware, device-aware and personalized applications to cope with the constraints of mobile devices in matters of interaction abilities and communication restrictions. In addition, the user also needs context information about services provided by the provider. For instance, the user can check the availability of service, the response time of service, the cost, quality of the service and quality of context information. This paper proposes a software framework which supports context-awareness behavior to assign services to Consumers and especially mobile users. This framework is based on Dynamic Software Product Line approach to handle this variability and adaptation in context at runtime. In fact, changes can occur in the application context requiring the cloud environment to be reconfigured, for instance, non-functional requirements like response-time, availability or pricing are violated. {\textcopyright} 2015 IEEE.}, doi = {10.1109/AICCSA.2015.7507225}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84980328145\&doi=10.1109\%2fAICCSA.2015.7507225\&partnerID=40\&md5=79647337f4c5ee7fcb4c6e501857840d}, author = {Benali, A. and El Asri, B. and Kriouile, H.} } @article {CarrilloDeGea2016205, title = {Co-located and distributed natural-language requirements specification: Traditional versus reuse-based techniques}, journal = {Journal of Software: Evolution and Process}, volume = {28}, number = {3}, year = {2016}, note = {cited By 1}, pages = {205-227}, abstract = {{Requirements Engineering (RE) includes processes intended to elicit, analyse, specify and validate systems and software requirements throughout the software life cycle. Mastering the principles of RE is key to achieving the goals of better, cheaper and quicker systems and software development projects. It is also important to be prepared to work with remote teammates, as distributed and global projects are becoming more common. This paper presents an experiment with a total of 31 students from two universities in Spain and Morocco who were assigned to either a co-located or a distributed team. Both traditional and reuse-based requirements specification techniques were applied by the participants to produce requirements documents. Their outcomes were then analysed, and the approaches were compared from the point of view of their effect on a set of performance-based and perception-based variables in co-located and distributed settings. We found significant differences in only productivity (Z = -2.320}, doi = {10.1002/smr.1772}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84977867180\&doi=10.1002\%2fsmr.1772\&partnerID=40\&md5=8d208a58af8dc0f155d3918e16591c20}, author = {Carrillo de Gea, J.M.a and Nicol{\~A}{\textexclamdown}s, J.a and Fern{\~A}{\textexclamdown}ndez Alem{\~A}{\textexclamdown}n, J.L.a and Toval, A.a and Ouhbi, S.b and Idri, A.b} } @article { ISI:000372921700003, title = {Co-located and distributed natural-language requirements specification: traditional versus reuse-based techniques}, journal = {JOURNAL OF SOFTWARE-EVOLUTION AND PROCESS}, volume = {28}, number = {3}, year = {2016}, month = {MAR}, pages = {205-227}, abstract = {{Requirements Engineering (RE) includes processes intended to elicit, analyse, specify and validate systems and software requirements throughout the software life cycle. Mastering the principles of RE is key to achieving the goals of better, cheaper and quicker systems and software development projects. It is also important to be prepared to work with remote teammates, as distributed and global projects are becoming more common. This paper presents an experiment with a total of 31 students from two universities in Spain and Morocco who were assigned to either a co-located or a distributed team. Both traditional and reuse-based requirements specification techniques were applied by the participants to produce requirements documents. Their outcomes were then analysed, and the approaches were compared from the point of view of their effect on a set of performance-based and perception-based variables in co-located and distributed settings. We found significant differences in only productivity (Z=-2.320}, issn = {2047-7473}, doi = {10.1002/smr.1772}, author = {Carrillo de Gea, Juan M. and Nicolas, Joaquin and Fernandez Aleman, Jose L. and Toval, Ambrosio and Ouhbi, Sofia and Idri, Ali} } @article {11390487420160301, title = {Co-located and distributed natural-language requirements specification: traditional versus reuse-based techniques.}, journal = {Journal of Software: Evolution \& Process}, volume = {28}, number = {3}, year = {2016}, pages = {205 - 227}, abstract = {Requirements Engineering (RE) includes processes intended to elicit, analyse, specify and validate systems and software requirements throughout the software life cycle. Mastering the principles of RE is key to achieving the goals of better, cheaper and quicker systems and software development projects. It is also important to be prepared to work with remote teammates, as distributed and global projects are becoming more common. This paper presents an experiment with a total of 31 students from two universities in Spain and Morocco who were assigned to either a co-located or a distributed team. Both traditional and reuse-based requirements specification techniques were applied by the participants to produce requirements documents. Their outcomes were then analysed, and the approaches were compared from the point of view of their effect on a set of performance-based and perception-based variables in co-located and distributed settings. We found significant differences in only productivi}, keywords = {Computer software, Computer software development, experiment, global software development, internationalisation, Requirements engineering, requirements reuse, requirements specification, Software engineering, software engineering education, Specifications}, issn = {20477473}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=113904874\&site=ehost-live}, author = {de Gea, Juan M. Carrillo and Nicolas, Joaquin and Fernandez Aleman, Jose L. and Toval, Ambrosio and Ouhbi, Sofia and Idri, Ali} } @conference {Berkani2016623, title = {Compact Genetic Algorithms with larger tournament size for soft-decision decoding}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {623-628}, abstract = {The standard compact Genetic Algorithm uses competition between two individuals for which we calculate the objective function and the winner will be the one who gives the best value of this function. This work presents a new approach of compact Genetic Algorithm that uses more than just two vectors of competition; it introduces tournament selection with larger size using mutation. We apply this algorithm to solve dual domain soft-decision decoding problem. A performance study of the new decoder will be done and the results will be compared to another compact genetic decoder that uses higher selection pressure with randomly generated individuals. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489189}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978427919\&doi=10.1109\%2fISDA.2015.7489189\&partnerID=40\&md5=0b021f10df757f48d5d54c2fa5de9db2}, author = {Berkani, A.a and Azouaoui, A.b and Belkasmi, M.a and Aylaj, B.b} } @conference {Rabii201693, title = {Comparison of e-readiness composite indicators}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {93-97}, abstract = {The e-readiness is an assessment tool designed to measure and analyse the level of ICT integration in development of country. Several tools of e-readiness using composite indicators approach to calculate the final index that characterizes the purpose of the e-readiness study. Based on selected individual indicators, composites can provide a way to benchmark and gauge the level of the countries{\textquoteright} economic progress. The main goal of this paper is to present a comparative study of the best-known e-readiness composite indicators based on some criteria such as: the estimation of missing data, normalization, weighting, and index calculation. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489207}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978413865\&doi=10.1109\%2fISDA.2015.7489207\&partnerID=40\&md5=52bb4d0700b64cc2c3a5891d658ae5f3}, author = {Rabii, L. and Abdelaziz, D.} } @conference { ISI:000392439200051, title = {Configuration of Daily Demand Predicting System Based on Neural Networks}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {Having a clear vision about future demand is a crucial key to enhance the commercial competitiveness to any efficient supply chain. However, demand forecasting is certainly not an easy task for a manager who had the choice between using traditional forecasting techniques encompassing time series methods, causal methods or simulation methods, or techniques based on artificial intelligence like artificial neural networks (ANNs), fuzzy logic or adaptive neuro fuzzy inference system (ANFIS). This paper focuses on the implementation and configuration of the artificial intelligence of neural networks, and more precisely the multi layer perceptron{\textquoteright}s structure, as a prediction system to produce daily demand forecasts based on historical demand information. The results indicate that adding new inputs to the neural network, in our case study, has a positive impact on the accuracy of the short term demand forecasting. In the numerical experimentation, the effectiveness of the proposed model is validated using is validated using a real-world data of a leader supermarket in Morocco.}, isbn = {978-1-4673-8571-8}, author = {Slimani, Ilham and El Farissi, Ilhame and Achchab, Said}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @conference {Slimani2016, title = {Configuration of daily demand predicting system based on neural networks}, booktitle = {Proceedings of the 3rd IEEE International Conference on Logistics Operations Management, GOL 2016}, year = {2016}, note = {cited By 0}, abstract = {Having a clear vision about future demand is a crucial key to enhance the commercial competitiveness to any efficient supply chain. However, demand forecasting is certainly not an easy task for a manager who had the choice between using traditional forecasting techniques encompassing time series methods, causal methods or simulation methods, or techniques based on artificial intelligence like artificial neural networks (ANNs), fuzzy logic or adaptive neuro fuzzy inference system (ANFIS). This paper focuses on the implementation and configuration of the artificial intelligence of neural networks, and more precisely the multi layer perceptron{\textquoteright}s structure, as a prediction system to produce daily demand forecasts based on historical demand information. The results indicate that adding new inputs to the neural network, in our case study, has a positive impact on the accuracy of the short term demand forecasting. In the numerical experimentation, the effectiveness of the proposed model is validated using is validated using a real-world data of a leader supermarket in Morocco. {\textcopyright} 2016 IEEE.}, doi = {10.1109/GOL.2016.7731709}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85001948885\&doi=10.1109\%2fGOL.2016.7731709\&partnerID=40\&md5=acdd39511da4d8415453e309226bc8c1}, author = {Slimani, I.a and Farissi, I.E.b and Al-Qualsadi, S.A.a} } @conference { ISI:000389715100001, title = {Dealing with Missing Values in Software Project Datasets: A Systematic Mapping Study}, booktitle = {SOFTWARE ENGINEERING, ARTIFICIAL INTELLIGENCE, NETWORKING AND PARALLEL/DISTRIBUTED COMPUTING}, series = {Studies in Computational Intelligence}, volume = {653}, year = {2016}, note = {17th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD), Shanghai, PEOPLES R CHINA, MAY 30-JUN 01, 2016}, pages = {1-16}, publisher = {IEEE; Int Assoc Comp \& Informat Sci; SSCTL; IEEE Comp Soc; Cent Michigan Univ, Software Engn \& Informat Technol Inst; Shanghai Univ; Shanghai Key Lab Comp Software Testing \& Evaluating}, organization = {IEEE; Int Assoc Comp \& Informat Sci; SSCTL; IEEE Comp Soc; Cent Michigan Univ, Software Engn \& Informat Technol Inst; Shanghai Univ; Shanghai Key Lab Comp Software Testing \& Evaluating}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. Therefore, various techniques have been developed to deal adequately with MV. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques.}, isbn = {978-3-319-33810-1; 978-3-319-33809-5}, issn = {1860-949X}, doi = {10.1007/978-3-319-33810-1\_1}, author = {Idri, Ali and Abnane, Ibtissam and Abran, Alain}, editor = {Lee, R} } @article {Idri20161, title = {Dealing with missing values in software project datasets: A systematic mapping study}, journal = {Studies in Computational Intelligence}, volume = {653}, year = {2016}, note = {cited By 0}, pages = {1-16}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. Therefore, various techniques have been developed to deal adequately with MV. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-33810-1_1}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969232970\&doi=10.1007\%2f978-3-319-33810-1_1\&partnerID=40\&md5=bcf59f4d24258293351f5636660fd78e}, author = {Idri, A.a and Abnane, I.a and Abran, A.b} } @conference { ISI:000389502600039, title = {Deep Neural Networks for Medical Images}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {382}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, author = {Elaalyani, Issam and Erradi, Mohammed}, editor = {Abdulla, PA and DelporteGallet, C} } @conference {Aylaj2016557, title = {Degeneration simulated annealing algorithm for combinatorial optimization problems}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {557-562}, abstract = {In this paper, we use the physical aspect of the simulated annealing method in order to propose a modified simulated annealing algorithm. The main idea of the algorithm is based to find the optimal solution of a combinatorial optimization problem by switching between two different subsystems of treatment; using so called degeneration of atoms energy. This new algorithm is named Degeneration Simulated Annealing (DSA) algorithm. To illustrate the effectiveness of DSA, it is applied to solve the problems of the minimum distance and the decoding, in coding theory. The computational experiment results obtained by DSA are very interesting. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489177}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978430456\&doi=10.1109\%2fISDA.2015.7489177\&partnerID=40\&md5=0471da6bb951204df77f8f7a184b034d}, author = {Aylaj, B.a and Belkasmi, M.b and Zouaki, H.a and Berkani, A.b} } @article {Assad2016469, title = {On the deployment quality for multi-intrusion detection in wireless sensor networks}, journal = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {cited By 0}, pages = {469-478}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. When the random deployment is required, which sensor nodes are uniformly randomly distributed over on surface area, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range and node density that impact overall system performance. To enhance the detection quality for single/multi intrusion, a probabilistic intrusion detection models are adopted, called single and multi sensing probability detection and the deployment quality issue is surveyed and analysed in term of coverage. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30298-0_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964066188\&doi=10.1007\%2f978-3-319-30298-0_48\&partnerID=40\&md5=5dca2bc944d09de42dd56334a955161e}, author = {Assad, N.a and Elbhiri, B.b and Faqihi, M.A.c and Ouadou, M.a and Aboutajdine, D.a} } @conference { ISI:000385280000048, title = {On the Deployment Quality for Multi-intrusion Detection in Wireless Sensor Networks}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015 (MEDCT 2015), VOL 2}, series = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), Saidia, MOROCCO, MAY 07-09, 2015}, pages = {469-478}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. When the random deployment is required, which sensor nodes are uniformly randomly distributed over on surface area, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range and node density that impact overall system performance. To enhance the detection quality for single/multi intrusion, a probabilistic intrusion detection models are adopted, called single and multi sensing probability detection and the deployment quality issue is surveyed and analysed in term of coverage.}, isbn = {978-3-319-30298-0; 978-3-319-30296-6}, issn = {1876-1100}, doi = {10.1007/978-3-319-30298-0\_48}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, My Ahmed and Ouadou, Mohamed and Aboutajdine, Driss}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @article {11984480720160715, title = {Dominant Multipoint Relaying Method for Efficient Proactive Routing Schema.}, journal = {Adhoc \& Sensor Wireless Networks}, volume = {33}, number = {1-4}, year = {2016}, pages = {321 - 338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes. [ABSTRACT FROM AUTHOR]}, keywords = {Ad hoc networks (Computer networks), Broadcast storm, Data packets \& packeting, mobile ad hoc network, Multipoint distribution service, Multipoint relaying, neighbor knowledge broadcast, proactive routing, Routing (Computer network management), Self-organizing systems}, issn = {15519899}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=119844807\&site=ehost-live}, author = {Bachir, Bouamoud and Ahmed, Habbani and Zouhair, Guennoun} } @article {Bachir2016321, title = {Dominant multipoint relaying method for efficient proactive routing schema}, journal = {Ad-Hoc and Sensor Wireless Networks}, volume = {33}, number = {1-4}, year = {2016}, note = {cited By 0}, pages = {321-338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes. {\textcopyright} 2016 Old City Publishing, Inc.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84996743438\&partnerID=40\&md5=4b6e635684dcd4a5121acf22d335fa2a}, author = {Bachir, B.a and Ahmed, H.a b and Zouhair, G.b} } @article { ISI:000389653700013, title = {Dominant Multipoint Relaying Method for Efficient Proactive Routing Schema}, journal = {AD HOC \& SENSOR WIRELESS NETWORKS}, volume = {33}, number = {1-4}, year = {2016}, pages = {321-338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes.}, issn = {1551-9899}, author = {Bachir, Bouamoud and Ahmed, Habbani and Zouhair, Guennoun} } @conference {Azougaghe2016421, title = {An efficient algorithm for data security in Cloud storage}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {421-427}, abstract = {Cloud computing has now become a major trend, it is a new data hosting technology that is Very popular in recent years thanks to the amortization of costs it induces to companies. In this paper we present the major security issues in cloud computing and we also propose a simple, secure, and privacy-preserving architecture for inter-Cloud data sharing based on an encryption/decryption algorithm which aims to protect the data stored in the cloud from the unauthorized access. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489267}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978374763\&doi=10.1109\%2fISDA.2015.7489267\&partnerID=40\&md5=eb13454b1c8384bc8bc11182c4419af5}, author = {Azougaghe, A.a and Kartit, Z.b and Hedabou, M.a and Belkasmi, M.a and El Marraki, M.b} } @article {Assad2016991, title = {Efficient deployment quality analysis for intrusion detection in wireless sensor networks}, journal = {Wireless Networks}, volume = {22}, number = {3}, year = {2016}, note = {cited By 0}, pages = {991-1006}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. The quality of deterministic deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range, transmission range, and node density that impact overall system performance. The major question is centred around the quality of intrusion detection in WSN, how we can guarantee that each point of the sensing area is covered by at least one sensor node, and what a sufficient condition to guarantee the network connectivity? In this paper, we propose an appropriate probabilistic model which provides the coverage and connectivity in k-sensing detection of a wireless sensor network. We have proved the capability of our approach using a geometric analysis and a probabilistic model. {\textcopyright} 2015, Springer Science+Business Media New York.}, doi = {10.1007/s11276-015-1015-z}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962534941\&doi=10.1007\%2fs11276-015-1015-z\&partnerID=40\&md5=6a1fff96c40ee0d1aa4f74e4d9c1a592}, author = {Assad, N.a and Elbhiri, B.b and Faqihi, M.A.c and Ouadou, M.a and Aboutajdine, D.a} } @article {11387931120160401, title = {Efficient deployment quality analysis for intrusion detection in wireless sensor networks.}, journal = {Wireless Networks (10220038)}, volume = {22}, number = {3}, year = {2016}, pages = {991 - 1006}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. The quality of deterministic deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range, transmission range, and node density that impact overall system performance. The major question is centred around the quality of intrusion detection in WSN, how we can guarantee that each point of the sensing area is covered by at least one sensor node, and what a sufficient condition to guarantee the network connectivity? In this paper, we propose an appropriate probabilistic model which provides the cover}, keywords = {Graph theory, Integer programming, Intrusion detection probability, Intrusion detection systems (Computer security), Multiple access protocols (Computer network protocols), Network connectivity, Network coverage, Quality of service, Sensing range, Transmission range, Wireless sensor networks}, issn = {10220038}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=113879311\&site=ehost-live}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, Moulay and Ouadou, Mohamed and Aboutajdine, Driss} } @article { ISI:000372921300019, title = {Efficient deployment quality analysis for intrusion detection in wireless sensor networks}, journal = {WIRELESS NETWORKS}, volume = {22}, number = {3}, year = {2016}, month = {APR}, pages = {991-1006}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. The quality of deterministic deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range, transmission range, and node density that impact overall system performance. The major question is centred around the quality of intrusion detection in WSN, how we can guarantee that each point of the sensing area is covered by at least one sensor node, and what a sufficient condition to guarantee the network connectivity? In this paper, we propose an appropriate probabilistic model which provides the coverage and connectivity in k-sensing detection of a wireless sensor network. We have proved the capability of our approach using a geometric analysis and a probabilistic model.}, issn = {1022-0038}, doi = {10.1007/s11276-015-1015-z}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, Moulay Ahmed and Ouadou, Mohamed and Aboutajdine, Driss} } @conference {Fath-Allah2016, title = {An E-government portals{\textquoteright} maturity model - Architectural and procedural views}, booktitle = {SITA 2016 - 11th International Conference on Intelligent Systems: Theories and Applications}, year = {2016}, note = {cited By 0}, abstract = {E-government portals are changing the way citizens are dealing with their governments. The e-government services can be executed by citizens without any location or time constraints, which results in great benefits for them. Therefore, agencies should follow worldwide e-government portals{\textquoteright} best practices that would contribute in the portals{\textquoteright} quality. E-government maturity models can be used for this purpose. The purpose of this paper is to propose an e-government portals{\textquoteright} maturity model that is based on a best practice model for e-government portals. This maturity model can help agencies identify their maturity rank by measuring the presence of those best practices. At the same time, the maturity model can provide guidelines and recommendations for agencies to move to an upper stage of maturity. {\textcopyright} 2016 IEEE.}, doi = {10.1109/SITA.2016.7772272}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010369791\&doi=10.1109\%2fSITA.2016.7772272\&partnerID=40\&md5=9042feeee58af2946ca1ad76185edb6a}, author = {Fath-Allah, A.a and Cheikhi, L.a and Al-Qutaish, R.E.b and Idri, A.a} } @conference { ISI:000391420300017, title = {An E-government Portals{\textquoteright} Maturity Model - Architectural and Procedural Views}, booktitle = {2016 11TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, series = {International Conference on Intelligent Systems, Theories and Applications}, year = {2016}, note = {11th International Conference on Intelligent Systems - Theories and Applications (SITA), Mohammedia, MOROCCO, OCT 19-20, 2016}, publisher = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, organization = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, abstract = {E-government portals are changing the way citizens are dealing with their governments. The e-government services can be executed by citizens without any location or time constraints, which results in great benefits for them. Therefore, agencies should follow worldwide e-government portals{\textquoteright} best practices that would contribute in the portals{\textquoteright} quality. E-government maturity models can be used for this purpose. The purpose of this paper is to propose an e-government portals{\textquoteright} maturity model that is based on a best practice model for e-government portals. This maturity model can help agencies identify their maturity rank by measuring the presence of those best practices. At the same time, the maturity model can provide guidelines and recommendations for agencies to move to an upper stage of maturity.}, isbn = {978-1-5090-5781-8}, issn = {2378-2528}, author = {Fath-Allah, Abdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali} } @conference {Sarhani2016288, title = {Feature selection and parameter optimization of support vector regression for electric load forecasting}, booktitle = {Proceedings of 2016 International Conference on Electrical and Information Technologies, ICEIT 2016}, year = {2016}, note = {cited By 0}, pages = {288-293}, abstract = {Forecasting of future electricity demand has become a promising issue for the electric power industry. Since many factors affect electric load data, machine learning methods are useful for electric load forecasting (ELF). On the one hand, it is important to determine the irrelevant factors as a preprocessing step for ELF. On the other hand, the performance of machine learning models depends heavily on the choice of its parameters. These problems are known respectively as feature selection and model selection problems. In this paper, we use the support vector regression (SVR) model for ELF. Our contribution consists of investigating the use the particle swarm optimization for both feature selection and model selection problems. Experimental results on two widely used electric load dataset show that our proposed hybrid method for feature selection and parameter optimization of SVR can achieve better results when compared with the classical SVR model while using feature selection and without using it. {\textcopyright} 2016 IEEE.}, doi = {10.1109/EITech.2016.7519608}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992187838\&doi=10.1109\%2fEITech.2016.7519608\&partnerID=40\&md5=c467eb46055ce1640930ac5b1ece403b}, author = {Sarhani, M. and El Afia, A.} } @conference { ISI:000391354500050, title = {Feature selection and parameter optimization of support vector regression for electric load forecasting}, booktitle = {2016 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT)}, year = {2016}, note = {2nd International Conference on Electrical and Information Technologies (ICEIT), Tangier, MOROCCO, MAY 04-07, 2016}, pages = {288-293}, abstract = {Forecasting of future electricity demand has become a promising issue for the electric power industry. Since many factors affect electric load data, machine learning methods are useful for electric load forecasting (ELF). On the one hand, it is important to determine the irrelevant factors as a preprocessing step for ELF. On the other hand, the performance of machine learning models depends heavily on the choice of its parameters. These problems are known respectively as feature selection and model selection problems. In this paper, we use the support vector regression (SVR) model for ELF. Our contribution consists of investigating the use the particle swarm optimization for both feature selection and model selection problems. Experimental results on two widely used electric load dataset show that our proposed hybrid method for feature selection and parameter optimization of SVR can achieve better results when compared with the classical SVR model while using feature selection and without using it.}, isbn = {978-1-4673-8469-8}, author = {Sarhani, Malek and El Afia, Abdellatif}, editor = {Essaaidi, M and ElHani, S} } @conference {Amrani2016, title = {Generalization of capacitated p-median location problem: Modeling and resolution}, booktitle = {Proceedings of the 3rd IEEE International Conference on Logistics Operations Management, GOL 2016}, year = {2016}, note = {cited By 0}, abstract = {The capacitated p-median location problem (CPMP) is very famous in literature and widely used within industry scope. However, in some cases, this location problem variant has poor management of capacity resources. In fact, the capacity used by facilities is fixed and not dependent on customers{\textquoteright} demands. The budget constraint Multi-Capacitated Location Problem (MCLP), considered in that paper, is a generalization of the CPMP problem, it is characterized by allowing each facility to be open with different capacities. In this paper, we will discuss the mathematical modeling of the MCLP problem, then we suggest adapted solving methods. To do this, we propose to solve the MCLP problem using Branch and Cut method. This exact solving method well-known, will serve us to test and validate our new problem formulation. Then we will build one heuristic algorithm, well adapted to our problem, it will be called GCDF (Greatest Customer Demand First). For improving solution quality, the LNS method will complete the GCDF. Computational results are presented at the end using instances that we have created under some criteria of difficulties or adapted from those of p-median problems available in literature. The GCDF (GCDF improved) algorithm is fast and provides good results for most degree of difficulty instances, but it is unreliable for very specific cases. To remedy this problem, the method must start with a basic feasible solution determined by one of the reliable method such as Branch and Bound. {\textcopyright} 2016 IEEE.}, doi = {10.1109/GOL.2016.7731674}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85002428361\&doi=10.1109\%2fGOL.2016.7731674\&partnerID=40\&md5=22a51daec1a55776484f33f5ea5ed3a0}, author = {Amrani, M.E.a and Benadada, Y.a and Gendron, B.b} } @conference { ISI:000392439200016, title = {Generalization of capacitated p-median location problem: modeling and resolution}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {The capacitated p-median location problem (CPMP) is very famous in literature and widely used within industry scope. However, in some cases, this location problem variant has poor management of capacity resources. In fact, the capacity used by facilities is fixed and not dependent on customers{\textquoteright} demands. The budget constraint Multi-Capacitated Location Problem (MCLP), considered in that paper, is a generalization of the CPMP problem, it is characterized by allowing each facility to be open with different capacities. In this paper, we will discuss the mathematical modeling of the MCLP problem, then we suggest adapted solving methods. To do this, we propose to solve the MCLP problem using Branch and Cut method. This exact solving method well-known, will serve us to test and validate our new problem formulation. Then we will build one heuristic algorithm, well adapted to our problem, it will be called GCDF (Greatest Customer Demand First). For improving solution quality, the LNS method will complete the GCDF. Computational results are presented at the end using instances that we have created under some criteria of difficulties or adapted from those of p-median problems available in literature. The GCDF{*} (GCDF improved) algorithm is fast and provides good results for most degree of difficulty instances, but it is unreliable for very specific cases. To remedy this problem, the method must start with a basic feasible solution determined by one of the reliable method such as Branch and Bound.}, isbn = {978-1-4673-8571-8}, author = {El Amrani, Mohammed and Benadada, Youssef and Gendron, Bernard}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @article { ISI:000391110500010, title = {Good Quasi-Cyclic Codes from Circulant Matrices Concatenation using a Heuristic Method}, journal = {INTERNATIONAL JOURNAL OF ADVANCED COMPUTER SCIENCE AND APPLICATIONS}, volume = {7}, number = {9}, year = {2016}, month = {SEP}, pages = {63-68}, abstract = {In this paper we present a method to search q circulant matrices; the concatenation of these circulant matrices with circulant identity matrix generates quasi-cyclic codes with high various code rate q/(q+1) (q an integer). This method searches circulant matrices in order to find the good quasi-cyclic code (QCC) having the largest minimum distance. A modified simulated annealing algorithm is used as an evaluator tool of the minimum distance of the obtained QCC codes. Based on this method we found 16 good quasi-cyclic codes with rates (1/2, 2/3 and 3/4), their estimated minimum distance reaches the lower bounds of codes considered to be the better linear block codes in Brouwer{\textquoteright}s database.}, issn = {2158-107X}, author = {Aylaj, Bouchaib and Belkasmi, Mostafa and Nouh, Said and Zouaki, Hamid} } @conference { ISI:000389502600020, title = {Hybrid Encryption Approach Using Dynamic Key Generation and Symmetric Key Algorithm for RFID Systems}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {244-249}, abstract = {The security of RFID systems become an important subject especially for low cost RFID tags. A lot of Cryptographic algorithms were proposed to insure the security and in the same time meet the resource limitations. In this paper, we proposed a hybrid cryptographic approach as symmetric key encryption technique which generate the key dynamically, together with integrity check parameters. The generation of key stream follows the chained approach, beginning from the initial key pre-shared. As a result, the computational complexity will be reduced as well as increase performance.}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, doi = {10.1007/978-3-319-46140-3\_20}, author = {Labbi, Zouheir and Maarof, Ahmed and Senhadji, Mohamed and Belkasmi, Mostafa}, editor = {Abdulla, PA and DelporteGallet, C} } @conference { ISI:000389502600037, title = {Hybrid Homomorphic Encryption for Cloud Privacy}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {380}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, author = {Bensitel, Yasmina and Rahal, Romadi}, editor = {Abdulla, PA and DelporteGallet, C} } @article { ISI:000392285600072, title = {Improved estimation of software development effort using Classical and Fuzzy Analogy ensembles}, journal = {APPLIED SOFT COMPUTING}, volume = {49}, year = {2016}, month = {DEC}, pages = {990-1019}, abstract = {Delivering an accurate estimate of software development effort plays a decisive role in successful management of a software project. Therefore, several effort estimation techniques have been proposed including analogy based techniques. However, despite the large number of proposed techniques, none has outperformed the others in all circumstances and previous studies have recommended generating estimation from ensembles of various single techniques rather than using only one solo technique. Hence, this paper proposes two types of homogeneous ensembles based on single Classical Analogy or single Fuzzy Analogy for the first time. To evaluate this proposal, we conducted an empirical study with 100/60 variants of Classical/Fuzzy Analogy techniques respectively. These variants were assessed using standardized accuracy and effect size criteria over seven datasets. Thereafter, these variants were clustered using the Scott-Knott statistical test and ranked using four unbiased errors measures. Moreover, three linear combiners were used to combine the single estimates. The results show that there is no best single Classical/Fuzzy Analogy technique across all datasets, and the constructed ensembles (Classical/Fuzzy Analogy ensembles) are often ranked first and their performances are, in general, higher than the single techniques. Furthermore, Fuzzy Analogy ensembles achieve better performance than Classical Analogy ensembles and there is no best Classical/Fuzzy ensemble across all datasets and no evidence concerning the best combiner. (C) 2016 Elsevier B.V. All rights reserved.}, issn = {1568-4946}, doi = {10.1016/j.asoc.2016.08.012}, author = {Idri, Ali and Hosni, Mohamed and Abran, Alain} } @article {Idri2016990, title = {Improved estimation of software development effort using Classical and Fuzzy Analogy ensembles}, journal = {Applied Soft Computing Journal}, volume = {49}, year = {2016}, note = {cited By 1}, pages = {990-1019}, abstract = {Delivering an accurate estimate of software development effort plays a decisive role in successful management of a software project. Therefore, several effort estimation techniques have been proposed including analogy based techniques. However, despite the large number of proposed techniques, none has outperformed the others in all circumstances and previous studies have recommended generating estimation from ensembles of various single techniques rather than using only one solo technique. Hence, this paper proposes two types of homogeneous ensembles based on single Classical Analogy or single Fuzzy Analogy for the first time. To evaluate this proposal, we conducted an empirical study with 100/60 variants of Classical/Fuzzy Analogy techniques respectively. These variants were assessed using standardized accuracy and effect size criteria over seven datasets. Thereafter, these variants were clustered using the Scott-Knott statistical test and ranked using four unbiased errors measures. Moreover, three linear combiners were used to combine the single estimates. The results show that there is no best single Classical/Fuzzy Analogy technique across all datasets, and the constructed ensembles (Classical/Fuzzy Analogy ensembles) are often ranked first and their performances are, in general, higher than the single techniques. Furthermore, Fuzzy Analogy ensembles achieve better performance than Classical Analogy ensembles and there is no best Classical/Fuzzy ensemble across all datasets and no evidence concerning the best combiner. {\textcopyright} 2016 Elsevier B.V.}, doi = {10.1016/j.asoc.2016.08.012}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84997417515\&doi=10.1016\%2fj.asoc.2016.08.012\&partnerID=40\&md5=b29d5f00b8c137b8fef1ba5d0c2a68a1}, author = {Idri, A.a and Hosni, M.a and Abran, A.b} } @conference { ISI:000382702400019, title = {Increasing network lifetime by energy-efficient routing scheme for OLSR protocol}, booktitle = {2016 INTERNATIONAL CONFERENCE ON INDUSTRIAL INFORMATICS AND COMPUTER SYSTEMS (CIICS)}, year = {2016}, note = {2nd IEEE International Conference on Industrial Informatics and Computer Systems (CIICS), Sharjah, U ARAB EMIRATES, MAR 13-15, 2016}, publisher = {IEEE}, organization = {IEEE}, abstract = {One of the main considerations in designing routing protocols for Mobile Ad-Hoc Network (MANET) is to increase network lifetime by minimizing nodes{\textquoteright} energy consumption, since nodes are typically battery powered. Many proposals have been addressed to this problem; however, few papers consider a proactive protocol like Optimized Link State Routing Protocol (OLSR) to better manage the energy consumption. Some of them have explored modifications to the MPRs selection mechanism, whereas others have investigated multiple cross layer parameters to increase the network lifetime. In this paper, we explored both modification to MPR selection and integrating appropriate routing metrics in the routing decision scheme to lessen effects of reason that lead to more energy consumption. Our power-aware version of OLSR is proven by simulations in NS3 under a range of different mobile scenarios. Significant performance gains of 20\% are obtained in network lifetime for our modified OLSR and little to no performance gains in term of Packet Delivery Ratio (PDR).}, isbn = {978-1-4673-8743-9}, author = {Sahnoun, Abdelkabir and El Abbadi, Jamal and Habbani, Ahmed} } @conference {Sahnoun2016, title = {Increasing network lifetime by energy-efficient routing scheme for OLSR protocol}, booktitle = {2016 International Conference on Industrial Informatics and Computer Systems, CIICS 2016}, year = {2016}, note = {cited By 0}, abstract = {One of the main considerations in designing routing protocols for Mobile Ad-Hoc Network (MANET) is to increase network lifetime by minimizing nodes{\textquoteright} energy consumption, since nodes are typically battery powered. Many proposals have been addressed to this problem; however, few papers consider a proactive protocol like Optimized Link State Routing Protocol (OLSR) to better manage the energy consumption. Some of them have explored modifications to the MPRs selection mechanism, whereas others have investigated multiple cross layer parameters to increase the network lifetime. In this paper, we explored both modification to MPR selection and integrating appropriate routing metrics in the routing decision scheme to lessen effects of reason that lead to more energy consumption. Our power-aware version of OLSR is proven by simulations in NS3 under a range of different mobile scenarios. Significant performance gains of 20\% are obtained in network lifetime for our modified OLSR and little to no performance gains in term of Packet Delivery Ratio (PDR). {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICCSII.2016.7462412}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969508612\&doi=10.1109\%2fICCSII.2016.7462412\&partnerID=40\&md5=79f4d2f9a4b515d335715ca23943ceac}, author = {Sahnoun, A.a and Abbadi, J.E.a and Habbani, A.b} } @conference { ISI:000390313000030, title = {Integrating SysML with simulation environments (Simulink) by model transformation approach}, booktitle = {2016 IEEE 25TH INTERNATIONAL CONFERENCE ON ENABLING TECHNOLOGIES: INFRASTRUCTURE FOR COLLABORATIVE ENTERPRISES (WETICE)}, year = {2016}, note = {25th IEEE International Conference on Enabling Technologies - Infrastructure for Collaborative Enterprises (WETICE), Paris, FRANCE, JUN 13-15, 2016}, pages = {148-150}, publisher = {IEEE; IEEE Comp Soc Techl Council Software Engn; West Virginia Univ, CERC; Paris Sch Business; CNRS Samovar Res Lab; Telecom SudParis, Inst Mines Telecom; Paris Sch Business Res Lab; Olab Dynam; eFrei Groupe}, organization = {IEEE; IEEE Comp Soc Techl Council Software Engn; West Virginia Univ, CERC; Paris Sch Business; CNRS Samovar Res Lab; Telecom SudParis, Inst Mines Telecom; Paris Sch Business Res Lab; Olab Dynam; eFrei Groupe}, abstract = {In system-level design, descriptive system models seem to be insufficient in order to perform a system verification which fulfils various stakeholders requirements. This fact is accentuated by the increasing complexity of system engineering projects and, as a consequence, the difficulties to deal with both their coordination and traceability. Even if SysML (System Modeling Language) is considered as a flexible and standard tool for system engineering, using only descriptive models are insufficient for system behavior verifications. To deal with this concern, simulation environments (i.e. MATLAB/Simulink) allow verifying if the system preliminary design satisfies requirements or not. As a consequence, various research works have been centered on combining the potential of both SysML modeling and simulation tools. This paper proposes an integration approach based on metamodeling and model transformations to generate Simulink models from SysML diagrams. This approach is handled by models and modern techniques of MDE (ModelDriven Engineering).}, isbn = {978-1-5090-1663-1}, doi = {10.1109/WETICE.2016.39}, author = {Chabibi, Bassim and Douche, Abdelilah and Anwar, Adil and Nassar, Mahmoud}, editor = {Reddy, SM and Gaaloul, W} } @conference {Chabibi2016148, title = {Integrating SysML with simulation environments (Simulink) by model transformation approach}, booktitle = {Proceedings - 25th IEEE International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises, WETICE 2016}, year = {2016}, note = {cited By 0}, pages = {148-150}, abstract = {In system-level design, descriptive system models seem to be insufficient in order to perform a system verification which fulfils various stakeholders0 requirements. This fact is accentuated by the increasing complexity of system engineering projects and, as a consequence, the difficulties to deal with both their coordination and trace ability. Even if SysML (System Modeling Language) is considered as a flexible and standard tool for system engineering, using only descriptive models are insufficient for system behavior verifications. To deal with this concern, simulation environments (i.e. MATLAB/Simulink) allow verifying if the system preliminary design satisfies requirements or not. As a consequence, various research works have been centered on combining the potential of both SysML modeling and simulation tools. This paper proposes an integration approach based on metamodeling and model transformations to generate Simulink models from SysML diagrams. This approach is handled by models and modern techniques of MDE (Model-Driven Engineering). {\textcopyright} 2016 IEEE.}, doi = {10.1109/WETICE.2016.39}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983801501\&doi=10.1109\%2fWETICE.2016.39\&partnerID=40\&md5=52ebc18a65b73155173099045a8b3ee2}, author = {Chabibi, B.a and Douche, A.b and Anwar, A.b and Nassar, M.a} } @conference {Amarouche20162876, title = {Introduction to competitive intelligence: Process, applications and tools}, booktitle = {Proceedings of the 27th International Business Information Management Association Conference - Innovation Management and Education Excellence Vision 2020: From Regional Development Sustainability to Global Economic Growth, IBIMA 2016}, year = {2016}, note = {cited By 0}, pages = {2876-2885}, abstract = {In the world of excessive business competitiveness, almost every company tries to monitor its environment to exceed the competitors. Getting knowledge about competitors is the basic principal of what is called Competitive Intelligence (CI). Many applications of Competitive Intelligence can be used like Opinion Mining and Foresight studies and the process of obtaining such intelligence differs according to the company{\textquoteright}s needs. In this paper, we will present in more details the definitions of CI and a general process grouping the most used steps in conducting such study. In the end we will present some tools useful in CI. Copyright {\textcopyright} 2016 International Business Information Management Association}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84984646253\&partnerID=40\&md5=1c66c097dc1b53d274daa126d36a743f}, author = {Amarouche, K. and El Akrouchi, M. and Benbrahim, H. and Kassou, I.} } @conference { ISI:000381172301104, title = {Introduction to Competitive Intelligence: Process, Applications and Tools}, booktitle = {INNOVATION MANAGEMENT AND EDUCATION EXCELLENCE VISION 2020: FROM REGIONAL DEVELOPMENT SUSTAINABILITY TO GLOBAL ECONOMIC GROWTH, VOLS I - VI}, year = {2016}, note = {27th International Business Information Management Association Conference, Milan, ITALY, MAY 04-05, 2016}, pages = {2876-2885}, abstract = {In the world of excessive business competitiveness, almost every company tries to monitor its environment to exceed the competitors. Getting knowledge about competitors is the basic principal of what is called Competitive Intelligence (CI). Many applications of Competitive Intelligence can be used like Opinion Mining and Foresight studies and the process of obtaining such intelligence differs according to the company{\textquoteright}s needs. In this paper, we will present in more details the definitions of CI and a general process grouping the most used steps in conducting such study. In the end we will present some tools useful in CI.}, isbn = {978-0-9860419-6-9}, author = {Amarouche, Kamal and El Akrouchi, Manal and Benbrahim, Houda and Kassou, Ismail}, editor = {Soliman, KS} } @article {Aoun2016347, title = {Investigation of hidden markov model for the tuning of metaheuristics in airline scheduling problems}, journal = {IFAC-PapersOnLine}, volume = {49}, number = {3}, year = {2016}, note = {cited By 0}, pages = {347-352}, abstract = {The tuning approach consists in finding the most suitable configuration of an algorithm for solving a given problem. Machine learning methods are usually used to automate this process. They may enable to construct robust autonomous artifacts whose behavior becomes increasingly expert. This paper focuses on the restriction of this general problem to the field of air planning and more specifically the crew scheduling problem. Metaheuristics are widely used to solve this problem. Our approach consists of using hidden markov model to find the best configuration of the algorithm based on the estimation of the most likely state. The experiment consists of finding the best parameter values of the particle swarm optimization algorithm for the crew scheduling problem. Our approach has shown that it can be a promising solution for automatic optimization of airline scheduling problems. {\textcopyright} 2016}, doi = {10.1016/j.ifacol.2016.07.058}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991108851\&doi=10.1016\%2fj.ifacol.2016.07.058\&partnerID=40\&md5=1a1629bc31766ee8816bba42989cb03d}, author = {Aoun, O. and Sarhani, M. and El Afia, A.} } @conference { ISI:000389806000017, title = {Knowledge Management Performance Measurement: A Generic Framework}, booktitle = {DATABASES AND INFORMATION SYSTEMS, DB\&IS 2016}, series = {Communications in Computer and Information Science}, volume = {615}, year = {2016}, note = {12th International Baltic Conference on Databases and Information Systems (DB and IS), Riga, LATVIA, JUL 04-06, 2016}, pages = {242-254}, publisher = {Univ Latvia, Fac Comp; Exigen Serv Latvia; IEEE; Springer}, organization = {Univ Latvia, Fac Comp; Exigen Serv Latvia; IEEE; Springer}, abstract = {This theoretical article aims to propose a generic framework for measuring performance of Knowledge Management (KM) projects based on critical literature review. The proposed framework fills the existing gap on KM performance measurement in two points: (i) it provides a generic tool that is able to assess all kinds of KM project as well as the overall organization KM, (ii) it assesses KM projects according to KM objectives in a generic manner. Our framework (GKMPM) relies on a process reference model that provides a KM common understanding in a process based view. It is based on a goal-oriented measurement approach and considers that KM performance dimensions are stakeholder{\textquoteright}s objectives. The framework application follows a procedural approach that begins with the KM project modelling, followed by the objectives prioritization. The next step consists of collecting and analysing data for pre-designed measures, and produces a set of key performance indicators (KPIs) related to the KM project processes and in accordance with its objectives.}, isbn = {978-3-319-40180-5; 978-3-319-40179-9}, issn = {1865-0929}, doi = {10.1007/978-3-319-40180-5\_17}, author = {Oufkir, Latifa and Fredj, Mounia and Kassou, Ismail}, editor = {Arnicans, G and Arnicane, V and Borzovs, J and Niedrite, L} } @conference {Annouch2016, title = {A literature review on the full trackload vehicle routing problems}, booktitle = {Proceedings of the 3rd IEEE International Conference on Logistics Operations Management, GOL 2016}, year = {2016}, note = {cited By 0}, abstract = {This article presents a literature review of the full truckload vehicle routing problem (FTVRP). In this problem, each vehicle can serve only one order at a time; thereafter, the delivery must be completed before the next request. Our review has considered some papers published between 1983 and 2015 and their classification was based on two main areas: First, the issue discussed with encountered business constraints and the obvious applications in industry. Second, the solution methods, both exact and approached (heuristics and metaheuristics). Some lines for further works are presented as well. {\textcopyright} 2016 IEEE.}, doi = {10.1109/GOL.2016.7731723}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85002412779\&doi=10.1109\%2fGOL.2016.7731723\&partnerID=40\&md5=9ebb4126ca35885c1a32d508e792ef9d}, author = {Annouch, A. and Bouyahyaoui, K. and Bellabdaoui, A.} } @conference { ISI:000392439200065, title = {A literature review on the full truckload vehicle routing problems}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {This article presents a literature review of the full truckload vehicle routing problem (FTVRP). In this problem, each vehicle can serve only one order at a time; thereafter, the delivery must be completed before the next request. Our review has considered some papers published between 1983 and 2015 and their classification was based on two main areas: First, the issue discussed with encountered business constraints and the obvious applications in industry. Second, the solution methods, both exact and approached (heuristics and metaheuristics). Some lines for further works are presented as well.}, isbn = {978-1-4673-8571-8}, author = {Annouch, Anouar and Bouyahyaoui, Karim and Bellabdaoui, Adil}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @conference { ISI:000391354500056, title = {A Mapping Between a BP model and an E-government Portals{\textquoteright} Maturity Model}, booktitle = {2016 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT)}, year = {2016}, note = {2nd International Conference on Electrical and Information Technologies (ICEIT), Tangier, MOROCCO, MAY 04-07, 2016}, pages = {326-331}, abstract = {E-government portals are playing an important role in facilitating the citizens{\textquoteright} life. The e-government services can be executed by citizens without any time or location constraints, which results in great benefits for the citizens. Therefore, agencies should pay a special attention when designing, developing and maintaining those e-government portals. In order to achieve this, agencies need to follow worldwide e-government portals{\textquoteright} best practices that would contribute in the portals{\textquoteright} quality. E-government maturity models can be used for this purpose. The purpose of this paper is to propose an e-government portals{\textquoteright} maturity model that is based on a best practice model for e-government portals.}, isbn = {978-1-4673-8469-8}, author = {Fath-Allah, Labdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali}, editor = {Essaaidi, M and ElHani, S} } @conference {Fath-Allah2016328, title = {A mapping between a BP model and an e-government portals{\textquoteright} maturity model}, booktitle = {Proceedings of 2016 International Conference on Electrical and Information Technologies, ICEIT 2016}, year = {2016}, note = {cited By 0}, pages = {328-333}, abstract = {E-government portals are playing an important role in facilitating the citizens{\textquoteright} life. The e-government services can be executed by citizens without any time or location constraints, which results in great benefits for the citizens. Therefore, agencies should pay a special attention when designing, developing and maintaining those e-government portals. In order to achieve this, agencies need to follow worldwide e-government portals{\textquoteright} best practices that would contribute in the portals{\textquoteright} quality. E-government maturity models can be used for this purpose. The purpose of this paper is to propose an e-government portals{\textquoteright} maturity model that is based on a best practice model for e-government portals. {\textcopyright} 2016 IEEE.}, doi = {10.1109/EITech.2016.7519615}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992222180\&doi=10.1109\%2fEITech.2016.7519615\&partnerID=40\&md5=cd259a586d4e37b0c6447aa1206e75d4}, author = {Fath-Allah, A.a and Cheikhi, L.a and Al-Qutaish, R.E.b and Idri, A.a} } @conference {Amine2016133, title = {Matching game for green uplink in hyper dense LTE HeTNets}, booktitle = {Proceedings - 2016 International Conference on Wireless Networks and Mobile Communications, WINCOM 2016: Green Communications and Networking}, year = {2016}, note = {cited By 0}, pages = {133-139}, abstract = {In this paper, we aim to improve the energy efficiency of cellular users located in hyper-dense co-channel deployments of LTE small cell Networks (SCNs), randomly distributed within LTE macro cell networks (MCNs). Avoiding the severe cross-Tier interferences at the small base stations (SBSs) levels caused by the uplink transmissions between the macro indoor users (which are inside the SBS coverage area) and the macro base station (MBS), ensuring load balancing, and improving energy efficiency are critical technical challenges in hyper-dense co-channel LTE SCNs deployments. As a solution, we formulate our problem as a matching game, then we propose the deferred acceptance algorithm to compute the optimal stable matching consisting of assigning each macro indoor user to the most suitable SBS and vice versa. Simulation results validate our solution, and show how it can effectively improve the energy efficiency of cellular users in hyper-dense LTE HetNets compared to the default Max-SINR association scheme. {\textcopyright} 2016 IEEE.}, doi = {10.1109/WINCOM.2016.7777204}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010465149\&doi=10.1109\%2fWINCOM.2016.7777204\&partnerID=40\&md5=cfdd0315adeb3a468a5382cae7b1ed26}, author = {Amine, M. and Walid, A. and Oualhaj, O.A. and Kobbane, A.} } @conference { ISI:000391420400024, title = {Matching Game for Green Uplink in Hyper Dense LTE HeTNets}, booktitle = {2016 INTERNATIONAL CONFERENCE ON WIRELESS NETWORKS AND MOBILE COMMUNICATIONS (WINCOM)}, year = {2016}, note = {International Conference on Wireless Networks and Mobile Communications (WINCOM), Fez, MOROCCO, OCT 26-29, 2016}, pages = {P133-P139}, publisher = {Univ Sidi Mohamed Ben Abdellah; FST FES; MobiTic; IEEE; IEEE Commun Soc; FSDM; ENSIAS; FST Mohammedia; CLusMed; TICSM Res Pole; Master Tec; CNRST; EBSA Fez; Mobit Assoc}, organization = {Univ Sidi Mohamed Ben Abdellah; FST FES; MobiTic; IEEE; IEEE Commun Soc; FSDM; ENSIAS; FST Mohammedia; CLusMed; TICSM Res Pole; Master Tec; CNRST; EBSA Fez; Mobit Assoc}, abstract = {In this paper, we aim to improve the energy efficiency of cellular users located in hyper-dense co-channel deployments of LTE small cell Networks (SCNs), randomly distributed within LTE macro cell networks (MCNs). Avoiding the severe cross-tier interferences at the small base stations (SBSs) levels caused by the uplink transmissions between the macro indoor users (which are inside the SBS coverage area) and the macro base station (MBS), ensuring load balancing, and improving energy efficiency are critical technical challenges in hyper-dense co-channel LTE SCNs deployments. As a solution, we formulate our problem as a matching game, then we propose the deferred acceptance algorithm to compute the optimal stable matching consisting of assigning each macro indoor user to the most suitable SBS and vice versa. Simulation results validate our solution, and show how it can effectively improve the energy efficiency of cellular users in hyper-dense LTE HetNets compared to the default Max-SINR association scheme.}, isbn = {978-1-5090-3837-4}, author = {Amine, Mariame and Walid, Abdellaziz and Ait Oualhaj, Omar and Kobbane, Abdellatif}, editor = {ElKamili, M and Berrada, I and Badri, A and Ghennioui, H} } @article {Idri2016595, title = {Missing data techniques in analogy-based software development effort estimation}, journal = {Journal of Systems and Software}, volume = {117}, year = {2016}, note = {cited By 0}, pages = {595-611}, abstract = {Missing Data (MD) is a widespread problem that can affect the ability to use data to construct effective software development effort prediction systems. This paper investigates the use of missing data (MD) techniques with two analogy-based software development effort estimation techniques: Classical Analogy and Fuzzy Analogy. More specifically, we analyze the predictive performance of these two analogy-based techniques when using toleration, deletion or k-nearest neighbors (KNN) imputation techniques. A total of 1512 experiments were conducted involving seven data sets, three MD techniques (toleration, deletion and KNN imputation), three missingness mechanisms (MCAR: missing completely at random, MAR: missing at random, NIM: non-ignorable missing), and MD percentages from 10 percent to 90 percent. The results suggest that Fuzzy Analogy generates more accurate estimates in terms of the Standardized Accuracy measure (SA) than Classical Analogy regardless of the MD technique, the data set used, the missingness mechanism or the MD percentage. Moreover, this study found that the use of KNN imputation, rather than toleration or deletion, may improve the prediction accuracy of both analogy-based techniques. However, toleration, deletion and KNN imputation are affected by the missingness mechanism and the MD percentage, both of which have a strong negative impact upon effort prediction accuracy. {\textcopyright} 2016 Elsevier Inc. All rights reserved.}, doi = {10.1016/j.jss.2016.04.058}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84966340503\&doi=10.1016\%2fj.jss.2016.04.058\&partnerID=40\&md5=014ecb6ca07ef9fbb58c0cbfb8c1dc3a}, author = {Idri, A.a and Abnane, I.a and Abran, A.b} } @article { ISI:000377231800036, title = {Missing data techniques in analogy-based software development effort estimation}, journal = {JOURNAL OF SYSTEMS AND SOFTWARE}, volume = {117}, year = {2016}, month = {JUL}, pages = {595-611}, abstract = {Missing Data (MD) is a widespread problem that can affect the ability to use data to construct effective software development effort prediction systems. This paper investigates the use of missing data (MD) techniques with two analogy-based software development effort estimation techniques: Classical Analogy and Fuzzy Analogy. More specifically, we analyze the predictive performance of these two analogy-based techniques when using toleration, deletion or k-nearest neighbors (KNN) imputation techniques. A total of 1512 experiments were conducted involving seven data sets, three MD techniques (toleration, deletion and KNN imputation), three missingness mechanisms (MCAR: missing completely at random, MAR: missing at random, NIM: non-ignorable missing), and MD percentages from 10 percent to 90 percent. The results suggest that Fuzzy Analogy generates more accurate estimates in terms of the Standardized Accuracy measure (SA) than Classical Analogy regardless of the MD technique, the data set used, the missingness mechanism or the MD percentage. Moreover, this study found that the use of KNN imputation, rather than toleration or deletion, may improve the prediction accuracy of both analogy-based techniques. However, toleration, deletion and KNN imputation are affected by the missingness mechanism and the MD percentage, both of which have a strong negative impact upon effort prediction accuracy. (C) 2016 Elsevier Inc. All rights reserved.}, issn = {0164-1212}, doi = {10.1016/j.jss.2016.04.058}, author = {Idri, Ali and Abnane, Ibtissam and Abran, Alain} } @conference { ISI:000392439200048, title = {Multi period dynamic vehicles routing problem: literature review, modelization and resolution}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {Being member of the VRP family, the Dynamic VRP (DVRP) has been a topic of interest in the realm of research, especially in the last decade. The steps of resolution were based on various approaches, ranging from the exact meta-heuristic to customized methods. This paper presents a literature review of the DVRP by classifying relevant studies according to the adopted approach of resolution. The dynamic extension of a version already present in the literature of the turned VRP multi classical is described then. A mathematical modeling is proposed for this extension. For the resolution of our problem, we adopted an approach based on the system of the colony of ants.}, isbn = {978-1-4673-8571-8}, author = {Ouaddi, Khaoula and Benadada, Youssef and Mhada, Fatima-Zahra}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @article {Jorio2016255, title = {Multi-hop clustering algorithm based on spectral classification for wireless sensor network}, journal = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {cited By 0}, pages = {255-264}, abstract = {A Wireless Sensor Network (WSN) is composed of a large number of autonomous and compact devices called sensor nodes. This network can be an effective tool for gathering data in a variety of environments. However, these sensor nodes have some constraints due to their limited energy, storage capacity and computing power. Clustering is a kind of a technique which is used to reduce energy consumption and to extend network lifetime. Hence, multi-hop communication is often required when the communication range of the sensor nodes is limited or the number of sensor nodes is very large in a network. In this paper, we propose a multi-hop spectral clustering algorithm to organize the sensor nodes in a WSN into clusters. Simulation results show that the proposed algorithm performs better in reducing the energy consumption of sensors and effectively improves the WSN lifetime. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30298-0_27}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964091516\&doi=10.1007\%2f978-3-319-30298-0_27\&partnerID=40\&md5=9978fe8f8eb166334a20f4b43e48bf35}, author = {Jorio, A.a and Fkihi, S.E.b and Elbhiri, B.c and Aboutajdine, D.a} } @conference { ISI:000385280000027, title = {Multi-hop Clustering Algorithm Based on Spectral Classification for Wireless Sensor Network}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015 (MEDCT 2015), VOL 2}, series = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), Saidia, MOROCCO, MAY 07-09, 2015}, pages = {255-264}, abstract = {A Wireless Sensor Network (WSN) is composed of a large number of autonomous and compact devices called sensor nodes. This network can be an effective tool for gathering data in a variety of environments. However, these sensor nodes have some constraints due to their limited energy, storage capacity and computing power. Clustering is a kind of a technique which is used to reduce energy consumption and to extend network lifetime. Hence, multi-hop communication is often required when the communication range of the sensor nodes is limited or the number of sensor nodes is very large in a network. In this paper, we propose a multi-hop spectral clustering algorithm to organize the sensor nodes in a WSN into clusters. Simulation results show that the proposed algorithm performs better in reducing the energy consumption of sensors and effectively improves the WSN lifetime.}, isbn = {978-3-319-30298-0; 978-3-319-30296-6}, issn = {1876-1100}, doi = {10.1007/978-3-319-30298-0\_27}, author = {Jorio, Ali and El Fkihi, Sanaa and Elbhiri, Brahim and Aboutajdine, Driss}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @conference { ISI:000389502600004, title = {Nearest Neighbors Graph Construction: Peer Sampling to the Rescue}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {48-62}, abstract = {In this paper, we propose an efficient KNN service, called KPS (KNN-Peer-Sampling). The KPS service can be used in various contexts e.g. recommendation systems, information retrieval and data mining. KPS borrows concepts from P2P gossip-based clustering protocols to provide a localized and efficient KNN computation in large-scale systems. KPS is a sampling-based iterative approach, combining randomness, to provide serendipity and avoid local minimum, and clustering, to ensure fast convergence. We compare KPS against the state of the art KNN centralized computation algorithm NNDescent, on multiple datasets. The experiments confirm the efficiency of KPS over NNDescent: KPS improves significantly on the computational cost while converging quickly to a close to optimal KNN graph. For instance, the cost, expressed in number of pairwise similarity computations, is reduced by approximate to 23\% and approximate to 49\% to construct high quality KNN graphs for Jester and MovieLens datasets, respectively. In addition, the randomized nature of KPS ensures eventual convergence, not always achieved with NNDescent.}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, doi = {10.1007/978-3-319-46140-3\_4}, author = {Benkaouz, Yahya and Erradi, Mohammed and Kermarrec, Anne-Marie}, editor = {Abdulla, PA and DelporteGallet, C} } @conference { ISI:000392439200021, title = {New approaches for solving the container stacking problem}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {Containers shipment has grown very fast during the last ten years, in Tanger Med port for example, 3Millions containers (80\% in transshipment) TEUs (Twenty feet equivalent unit) has been changed during 2015 (Tanger Med Port authority {\textquoteleft}{\textquoteleft}TMPA{{\textquoteright}{\textquoteright}} study March{\textquoteright} 2016 {[} 1]), which correspond to an increase of 40\% comparing with the last study made in 2012. Thus, our study will deal with the port management and the improvement of the operations processes. The aim of this paper is to define a new strategies to solve the container stacking problem (CSP) using an approach of optimization. Thus we define a new MIP (Mathematical Integer Program) to deal with the operational tasks in a containers terminal. In which we optimize the number of the stacks used to store a given number of inbound containers and also we minimize the related cost of the traveling distance for inbound containers between the sea side and the yard side. This paper is organized as follow: we introduce and locate first our problem, then we present the literature review of the CSP. The problem definition and the MIP introduction will be the subject of the next section, and we finish by presenting the findings and the future perspectives. As a proposed resolution approach for our MIP, we propose a developed genetic algorithm strategy (DGAS) as a metaheuristic and the Branch \& Cut (B\&C) as an exact method. Our main objective is to avoid reshuffles and to find out the best yard configuration to store inbound containers. The DGAS will be applied to an existing instances in the literature, and the obtained numerical results is compared with the Cplex results (B\&C). The main inputs for our proposed framework are the height, weight, destination, type containers \& yard bays, and the expected departure time (EDT). Our objective at the end is to have an optimized guide to the planners to easily define the unloading plan and the storage position for each container, giving an initial stacking state and a container demand.}, isbn = {978-1-4673-8571-8}, author = {Razouk, Chafik and Benadada, Youssef and Boukachour, Jaouad}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @conference { ISI:000392439200017, title = {A new crossover to solve the full truckload vehicle routing problem using genetic algorithm}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {This paper considers the full-truckload selective multi-depot vehicle routing problem under time windows constraints (denoted by FT-SMDVRPTW), which is a generalization of the vehicle routing problem (VRP). Our objective function is to maximize the total profit that the vehicle generates during its trip. In this study, we{\textquoteright}ll present a review of literature about full truckload vehicle routing; we{\textquoteright}ll define the FT-SMDVRPTW that will be resolved via using genetic algorithm. A new complex two-part chromosome is used to represent the solution to our problem. Through a selection based on the elitist method and roulette method, an improved crossover operator called selected two-part chromosome crossover (STCX), and swap mutation operator new individuals are generated. Finally, we give a numerical example on a randomly generated instance to illustrate our approach.}, isbn = {978-1-4673-8571-8}, author = {El Bouyahyiouy, Karim and Bellabdaoui, Adil}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @article {Nouh2016742, title = {New efficient scheme based on reduction of the dimension in the multiple impulse method to find the minimum distance of linear codes}, journal = {International Review on Computers and Software}, volume = {11}, number = {9}, year = {2016}, note = {cited By 0}, pages = {742-751}, abstract = {In order to find a minimum weight codeword in a linear code, the Multiple Impulse Method uses the Ordered Statistics Decoder of order 3 having a complexity which increases with the code dimension. This paper presents an important improvement of this method by finding a sub code of C of small dimension containing a lowest weight codeword. In the case of Binary Extended Quadratic Residue codes, the proposed technique consists on finding a self invertible permutation σ from the projective special linear group and searching a codeword having the minimum weight in the sub code fixed by σ. The proposed technique gives the exact value of the minimum distance for all binary quadratic residue codes of length less than 223 by using the Multiple Impulse Method on the sub codes in less than one second. For lengths more than 223, the obtained results prove the height capacity of the proposed technique to find the lowest weight in less time. The proposed idea is generalized for BCH codes and it has permits to find the true value of the minimum distance for some codes of lengths 1023 and 2047. The proposed methods performed very well in comparison to previously known results. {\textcopyright} 2016 Praise Worthy Prize S.r.l. - All rights reserved.}, doi = {10.15866/irecos.v11i9.9702}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85006717110\&doi=10.15866\%2firecos.v11i9.9702\&partnerID=40\&md5=0cb2c7691fda83c0ef9764a93095e64c}, author = {Nouh, S.a and Joundan, I.A.a and Aylaj, B.b and Belkasmi, M.c and Namir, A.a} } @article {Iazzi2016156, title = {A new method for fall detection of elderly based on human shape and motion variation}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {10073 LNCS}, year = {2016}, note = {cited By 0}, pages = {156-167}, abstract = {Fall detection for elderly and patient has been an active research topic due to the great demand for products and technology of fall detection in the healthcare industry. Computer vision provides a promising solution to analyze personal behavior and detect certain unusual events such as falls. In this paper, we present a new method for fall detection based on the variation of shape and motion. First, we use the CodeBook method to extract the person silhouette from the video. Then, information of rectangle, ellipse and histogram projection are used to provide features to analyze the person shape. In addition, we represent the person shape by three blocks extracted from rectangle. Then, we use optical flow to analyze the person motion within each blocks. Finally, falls are detected from normal activities using thresholding-based method. All experiments show that our fall detection system achieves very good performances in accuracy and error rate. {\textcopyright} Springer International Publishing AG 2016.}, doi = {10.1007/978-3-319-50832-0_16}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85007364688\&doi=10.1007\%2f978-3-319-50832-0_16\&partnerID=40\&md5=4ed27ef63754e531149224bda59c8227}, author = {Iazzi, A.a and Rziza, M.a and Thami, R.O.H.a b and Aboutajdine, D.a} } @conference {Anter2016, title = {Nk-schemas: A novel algorithm for creating the view{\textquoteright} schemas to materialize in hybrid mediator}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, volume = {2016-July}, year = {2016}, note = {cited By 0}, abstract = {The explosion of information and telecommunications technologies, has made easy the access and production of information. Thus, a very large mass of the latter has generated. This situation has made the integration systems an immediate necessity. Among these systems, there is the hybrid mediator. The latter interrogates one part of data on demand as in the virtual approach, while charging, filtering and storing the second part, as views, in a local database. The creation of this second part is a critical task. We propose in this paper, a new algorithm for creating views{\textquoteright} schemas to materialize in the hybrid integration system. {\textcopyright} 2015 IEEE.}, doi = {10.1109/AICCSA.2015.7507200}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84980416133\&doi=10.1109\%2fAICCSA.2015.7507200\&partnerID=40\&md5=1412e0fe0867141a2f0ec5a2e0034ebb}, author = {Anter, S. and Zellou, A. and Idri, A.} } @conference {Essadi2016, title = {Operator-based coordination between heterogeneous DSLs: Case of telecommunication network supervision}, booktitle = {2016 International Conference on Information Technology for Organizations Development, IT4OD 2016}, year = {2016}, note = {cited By 0}, abstract = {Coordination between heterogeneous DSLs is needed more and more to overcome complexity of modern systems involving many business domains. This article gives a definition of heterogeneity, coordination and a classification of possible relationships between DSLs. Then, propose coordination operators to resolve heterogeneity accordingly with precedent works. As illustrative example, the paper presents a telecommunication network supervision system where two different DSLs: DSL ANS.1 and DSL supervision need to be coordinated, an operator of structural mapping has been used to coordinate between the two DSLs. {\textcopyright} 2016 IEEE.}, doi = {10.1109/IT4OD.2016.7479252}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978115095\&doi=10.1109\%2fIT4OD.2016.7479252\&partnerID=40\&md5=2eb0dfb1e58d27a27c6a238f6a5dc265}, author = {Essadi, N.a and Anwar, A.a and Mahmoud, N.b} } @conference { ISI:000386352000004, title = {Operator-Based Coordination between Heterogeneous DSLs : Case of Telecommunication Network Supervision}, booktitle = {2016 INTERNATIONAL CONFERENCE ON INFORMATION TECHNOLOGY FOR ORGANIZATIONS DEVELOPMENT (IT4OD)}, year = {2016}, note = {International Conference on Information Technology for Organizations Development (IT4OD), USMBA Univ, ENSA Fez, ERSI Lab, Fez, MOROCCO, MAR 30-APR 01, 2016}, publisher = {Natl Sch Appl Sci Fez; Univ Sidi Mohamed Ben Abdellah; FST Fez; Fac Sci Fez; Lab Renewable Energies \& Intelligent Syst; Assoc ANRITE; CNRST; IEEE Morocco Sect; IEEE Advancing Technol Human}, organization = {Natl Sch Appl Sci Fez; Univ Sidi Mohamed Ben Abdellah; FST Fez; Fac Sci Fez; Lab Renewable Energies \& Intelligent Syst; Assoc ANRITE; CNRST; IEEE Morocco Sect; IEEE Advancing Technol Human}, abstract = {Coordination between heterogeneous DSLs is needed more and more to overcome complexity of modern systems involving many business domains. This article gives a definition of heterogeneity, coordination and a classification of possible relationships between DSLs. Then, propose coordination operators to resolve heterogeneity accordingly with precedent works. As illustrative example, the paper presents a telecommunication network supervision system where two different DSLs: DSL ANS.1 and DSL supervision need to be coordinated, an operator of structural mapping has been used to coordinate between the two DSLs.}, isbn = {978-1-4673-7689-1}, author = {Essadi, Naima and Anwar, Adil and Mahmoud, Nassar} } @conference {Sarhani2016, title = {Particle swarm optimization with a mutation operator for solving the preventive aircraft maintenance routing problem}, booktitle = {Proceedings of the 3rd IEEE International Conference on Logistics Operations Management, GOL 2016}, year = {2016}, note = {cited By 0}, abstract = {Aircraft Maintenance Routing (AMR) is one of the major optimization problems in the airline industry. In this study, we present a mathematical formulation for the daily AMR problem which aims to minimize the risk of both scheduled and non-scheduled maintenance costs. Exact methods may fail to deal with such problems. Our contribution is then to examine the use of an improved particle swarm optimization (PSO) algorithm by a uniform mutation operator for solving this probabilistic problem. Computational results show that our hybrid approach gives competitive results comparing to the native binary PSO. {\textcopyright} 2016 IEEE.}, doi = {10.1109/GOL.2016.7731683}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85001976721\&doi=10.1109\%2fGOL.2016.7731683\&partnerID=40\&md5=9bef08dc37eff9c6ed7b1c2c6e2e41b6}, author = {Sarhani, M. and Ezzinbi, O. and Afia, A.E. and Benadada, Y.} } @conference { ISI:000392439200025, title = {Particle swarm optimization with a mutation operator for solving the preventive aircraft maintenance routing problem}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {Aircraft Maintenance Routing (AMR) is one of the major optimization problems in the airline industry. In this study, we present a mathematical formulation for the daily AMR problem which aims to minimize the risk of both scheduled and non-scheduled maintenance costs. Exact methods may fail to deal with such problems. Our contribution is then to examine the use of an improved particle swarm optimization (PSO) algorithm by a uniform mutation operator for solving this probabilistic problem. Computational results show that our hybrid approach gives competitive results comparing to the native binary PSO.}, isbn = {978-1-4673-8571-8}, author = {Sarhani, Malek and Ezzinbi, Omar and El Afia, Abdellatif and Benadada, Youssef}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @conference { ISI:000392439200033, title = {Proposal of a modeling approach and a set of KPI to the drug supply chain within the hospital}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {The hospital is a complex system where many actors are involved; The challenges that this environment is facing justify the focus on this research subject. The contribution of the paper includes a proposition of a four-level model, that describes the hospital drug supply chain, based on the combination of the Supply Chain Operation Reference (SCOR) and Business Process Modelling Notation (BPMN), and the identification of a set of key performance indicators (KPI) that assess how well the drugs management process within the hospital are effective and efficient and how to improve them.}, isbn = {978-1-4673-8571-8}, author = {Mezouar, Houda and El Afia, Abdellatif and Chiheb, Radouane and Ouzayd, Fatima}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @conference {Haddad2016237, title = {Rectangular Dielectric Resonator Antenna (RDRA) for anti-collision short range radar (SRR) application}, booktitle = {Proceedings of 2016 International Conference on Electrical and Information Technologies, ICEIT 2016}, year = {2016}, note = {cited By 0}, pages = {237-239}, abstract = {This paper presents the design of Rectangular Dielectric Resonator Antenna (RDRA) feed by a micro-strip line for anti-collision radar SRR application at 24 GHz. The proposed RDRA operates at a frequency of 24 GHz with a high dielectric constant of 41. The simulated RDRA has a high radiation efficiency (88.4 \%). The return loss, radiation pattern and gain of the proposed antenna are evaluated. The simulation process was carried out using Computer Simulation Technology (CST) Microwave Studio. {\textcopyright} 2016 IEEE.}, doi = {10.1109/EITech.2016.7519597}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992188168\&doi=10.1109\%2fEITech.2016.7519597\&partnerID=40\&md5=459bdb70786bf969bb54ead0761fd18e}, author = {Haddad, A.a and Aoutoul, M.b and Rais, K.a and Essaaidi, M.c} } @conference { ISI:000391354500040, title = {Rectangular Dielectric Resonator Antenna (RDRA) for Anti-collision Short Range Radar (SRR) Application}, booktitle = {2016 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT)}, year = {2016}, note = {2nd International Conference on Electrical and Information Technologies (ICEIT), Tangier, MOROCCO, MAY 04-07, 2016}, pages = {237-239}, abstract = {This paper presents the design of Rectangular Dielectric Resonator Antenna (RDRA) feed by a micro-strip line for anti-collision radar SRR application at 24 GHz. The proposed RDRA operates at a frequency of 24 GHz with a high dielectric constant of 41. The simulated RDRA has a high radiation efficiency (88.4 \%). The return loss, radiation pattern and gain of the proposed antenna are evaluated. The simulation process was carried out using Computer Simulation Technology (CST) Microwave Studio.}, isbn = {978-1-4673-8469-8}, author = {Haddad, Abderrahim and Aoutoul, Mohssin and Rais, Khalid and Essaaidi, Mohamed}, editor = {Essaaidi, M and ElHani, S} } @article {Anter20162128, title = {Retrieving and materializing data in hybrid mediators}, journal = {International Journal of Applied Engineering Research}, volume = {11}, number = {3}, year = {2016}, note = {cited By 0}, pages = {2128-2134}, abstract = {With the emergence of the new generation of information technologies and telecommunications, the mass of information produced by individuals and enterprises has increased in a considerable manner. Thus, and in order to manage this diversity of information, the integration systems were proposed. Among these, we find the hybrid information integration systems. They allow materializing a part of data in a local database, while integrating virtually the other part. As these materialized one is organized as views, it becomes necessary to propose algorithms for this objective. Among the most interesting ones, there is k-schema. It allows organize the attributes into a set of views, while affecting an attribute to a single view. This choice causes the not loading some of data that are highly requested by users, and at the same time, it loads other that are rarely requested. In this paper, we propose a new algorithm in this end. In this latter, it is authorized to assign a same attribute to several views. We also proposed new functions for calculating the dependencies between attributes. {\textcopyright} Research India Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84973470278\&partnerID=40\&md5=140f0f35ca53260f11d97b5958f6ee8c}, author = {Anter, S. and Zellou, A. and Idri, A.} } @article {Amraoui2016, title = {Security-Based Mechanism for Proactive Routing Schema Using Game Theory Model}, journal = {Mobile Information Systems}, volume = {2016}, year = {2016}, note = {cited By 0}, abstract = {Game theory may offer a useful mechanism to address many problems in mobile ad hoc networks (MANETs). One of the key concepts in the research field of such networks with Optimized Link State Routing Protocol (OLSR) is the security problem. Relying on applying game theory to study this problem, we consider two strategies during this suggested model: cooperate and not-cooperate. However, in such networks, it is not easy to identify different actions of players. In this paper, we have essentially been inspired from recent advances provided in game theory to propose a new model for security in MANETs. Our proposal presents a powerful tool with a large number of players where interactions are played multiple times. Moreover, each node keeps a cooperation rate (CR) record of other nodes to cope with the behaviors and mitigate aggregate effect of other malicious devices. Additionally, our suggested security mechanism does not only take into consideration security requirements, but also take into account system resources and network performances. The simulation results using Network Simulator 3 are presented to illustrate the effectiveness of the proposal. {\textcopyright} 2016 Hicham Amraoui et al.}, doi = {10.1155/2016/5653010}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85008883716\&doi=10.1155\%2f2016\%2f5653010\&partnerID=40\&md5=5aaffd8b2d52f8958a3f2543444109fe}, author = {Amraoui, H.a and Habbani, A.a b and Hajami, A.c and Bilal, E.d} } @article { ISI:000390576800001, title = {Security-Based Mechanism for Proactive Routing Schema Using Game Theory Model}, journal = {MOBILE INFORMATION SYSTEMS}, year = {2016}, abstract = {Game theory may offer a useful mechanism to address many problems in mobile ad hoc networks (MANETs). One of the key concepts in the research field of such networks with Optimized Link State Routing Protocol (OLSR) is the security problem. Relying on applying game theory to study this problem, we consider two strategies during this suggested model: cooperate and not-cooperate. However, in such networks, it is not easy to identify different actions of players. In this paper, we have essentially been inspired from recent advances provided in game theory to propose a new model for security in MANETs. Our proposal presents a powerful tool with a large number of players where interactions are played multiple times. Moreover, each node keeps a cooperation rate (CR) record of other nodes to cope with the behaviors and mitigate aggregate effect of other malicious devices. Additionally, our suggested security mechanism does not only take into consideration security requirements, but also take into account system resources and network performances. The simulation results using Network Simulator 3 are presented to illustrate the effectiveness of the proposal.}, issn = {1574-017X}, doi = {10.1155/2016/5653010}, author = {Amraoui, Hicham and Habbani, Ahmed and Hajami, Abdelmajid and Bilal, Essaid} } @article {Ayoub20162179, title = {Serially concatenated OSMLD codes: Design and iterative decoding}, journal = {Applied Mathematical Sciences}, volume = {10}, number = {41-44}, year = {2016}, note = {cited By 0}, pages = {2179-2188}, abstract = {In this paper, the performance of serially concatenated one-step ma- jority logic decodable (SC-OSMLD) codes is investigated. The iterative decoding process uses a soft-input soft-output threshold decoding algo- rithm [1] as component decoder with our proposed connection scheme [2]. The effect of various components codes, interleaver size (Number of sub-blocks), and the number of iterations are investigated. Simulation results for SC-OSMLD codes transmitted over Additive White Gaussian Noise channel(AWGN) are provided. The simulation result will show that the slope of curves and coding gain are improved by increasing the number of decoder iterations and/or the interleaver size. {\textcopyright} 2016 Fouad Ayoub et al.}, doi = {10.12988/ams.2016.6387}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983503792\&doi=10.12988\%2fams.2016.6387\&partnerID=40\&md5=46efa596ad7d8a871fc411883576d89c}, author = {Ayoub, F.a and Farchane, A.b and Mohamed, A.c and Belkasmi, M.c and Himmi, M.M.d} } @conference { ISI:000376431400019, title = {Simulated Annealing Decoding of Linear Block Codes}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015, VOL 1}, series = {Lecture Notes in Electrical Engineering}, volume = {380}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), MOROCCO, MAY 07-09, 2015}, pages = {175-183}, abstract = {In this paper, we present a hard-decision decoding algorithm using Simulated Annealing (SA) technique. The main idea is to find the optimal solution of the transmitted codeword by a process hopping between two different tasks. The simulations, applied on some binary linear block codes over the AWGN channel, show that the Simulated Annealing decoder has the same performance as the Berlekamp-Massey Algorithm (BM). Furthermore SA Decoder is more efficient compared to other Decoder based on Genetic algorithms in terms of performance and run time.}, isbn = {978-3-319-30301-7; 978-3-319-30299-7}, issn = {1876-1100}, doi = {10.1007/978-3-319-30301-7\_19}, author = {Aylaj, Bouchaib and Belkasmi, Mostafa}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @article {Aylaj2016175, title = {Simulated annealing decoding of linear block codes}, journal = {Lecture Notes in Electrical Engineering}, volume = {380}, year = {2016}, note = {cited By 0}, pages = {175-183}, abstract = {In this paper, we present a hard-decision decoding algorithm using Simulated Annealing (SA) technique. The main idea is to find the optimal solution of the transmitted codeword by a process hopping between two different tasks. The simulations, applied on some binary linear block codes over the AWGN channel, show that the Simulated Annealing decoder has the same performance as the Berlekamp-Massey Algorithm (BM). Furthermore SA Decoder is more efficient compared to other Decoder based on Genetic algorithms in terms of performance and run time. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30301-7_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964495058\&doi=10.1007\%2f978-3-319-30301-7_19\&partnerID=40\&md5=d6baed6c15bb3a2d4e616c03caff9e9f}, author = {Aylaj, B.a and Belkasmi, M.b} } @conference { ISI:000389502600045, title = {SNA: Detecting Influencers over Social Networks}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {388}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, author = {Aghmadi, Ali and Erradi, Mohammed and Kobbane, Abdellatif}, editor = {Abdulla, PA and DelporteGallet, C} } @conference {Annouch2016, title = {Split delivery and pickup vehicle routing problem with two-dimensional loading constraints}, booktitle = {SITA 2016 - 11th International Conference on Intelligent Systems: Theories and Applications}, year = {2016}, note = {cited By 0}, abstract = {In this paper, we address the distribution problem of liquefied petroleum gas (LPG). In particular we are targeting the optimization of pickup and split delivery of gas bottles to a set of customers with loading constraints. The problem combines the loading of a heterogeneous fleet of vehicles through a set of different size racks and then the establishment of divisible deliveries (split delivery: SDVRP). The problem discussed in this paper can be presented as a Multi-depot Two-Loading Split Delivery and Pickup using Heterogeneous fleet of Vehicles with Time Windows vehicle Routing Problem and with Multi-Product (2L-SDP-HVRPTW-MP). After proposing a mathematical formulation for this problem (MILP), we tested our model firstly by a small instances using ILOG CPLEX solver following this by testing and discussing numerical results. {\textcopyright} 2016 IEEE.}, doi = {10.1109/SITA.2016.7772277}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010366293\&doi=10.1109\%2fSITA.2016.7772277\&partnerID=40\&md5=dcf8e30d18c805be9f705a1c130183cf}, author = {Annouch, A. and Bellabdaoui, A. and Minkhar, J.} } @conference { ISI:000391420300022, title = {Split delivery and pickup vehicle routing problem with two-dimensional loading constraints}, booktitle = {2016 11TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, series = {International Conference on Intelligent Systems, Theories and Applications}, year = {2016}, note = {11th International Conference on Intelligent Systems - Theories and Applications (SITA), Mohammedia, MOROCCO, OCT 19-20, 2016}, publisher = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, organization = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, abstract = {In this paper, we address the distribution problem of liquefied petroleum gas (LPG). In particular we are targeting the optimization of pickup and split delivery of gas bottles to a set of customers with loading constraints. The problem combines the loading of a heterogeneous fleet of vehicles through set of different size racks and then the establishment of divisible deliveries (split delivery: SDVRP). The problem discussed in this paper can be presented as a Multi-depot Two-Loading Split Delivery and Pickup using Heterogeneous fleet of Vehicles with Time Windows vehicle Routing Problem and with Multi-Product (2L-SDP-HVRPTW-MP). After proposing a mathematical formulation for this problem (MILP), we tested our model firstly by a small instances using ILOG CPLEX solver following this by testing and discussing numerical results.}, isbn = {978-1-5090-5781-8}, issn = {2378-2528}, author = {Annouch, Anouar and Bellabdaoui, Adil and Minkhar, Jawad} } @conference { ISI:000392439200008, title = {Strategic planning problem represented by a three-echelon logistics network-modeling and solving}, booktitle = {PROCEEDINGS OF THE 3RD IEEE INTERNATIONAL CONFERENCE ON LOGISTICS OPERATIONS MANAGEMENT (GOL{\textquoteright}16)}, year = {2016}, note = {3rd IEEE International Conference on Logistics Operations Management (GOL), Fes, MOROCCO, MAY 23-25, 2016}, publisher = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, organization = {Sidi Mohammed Ben Abdellah Univ Fes, Fac Sci \& Technol; Mohammed V Univ Rabat, ENSIAS Sch; Univ Havre; IEEE}, abstract = {This article aims to elaborate a strategic plan allowing to decision makers to take right decisions (Selecting suppliers, Selecting plants that can produce a specific product, ..) in the right moment in order to minimize the generated costs. Our work consists, then to optimize a multi-scales and multi-periods location-distribution problem. The problem belongs to the FLNP family with a complexity of order of NP-difficult. The objective of our problem MIP is to maximize the incomes of a production company via the minimization of costs: the cost of supplying, the cost of producing and the cost of transportation. Several aspects would be treated in this subject: the horizon of planning -multi-periods and the structure of network (multi-echelons). Based on the limits of exact methods, we have proposed to resolve this problem on the basis of a heuristic method, the choice which seems to be the most adequate for our problem is LNS (Large Neighborhood Search). It is in this perspective that we have reformulated our model {[}12] in order to be represented under the form of a logistic network based on paths before the application of LNS.}, isbn = {978-1-4673-8571-8}, author = {Hamada, Yahya and Benadada, Youssef and Gendron, Bernard}, editor = {Alaoui, AE and Benadada, Y and Boukachour, J} } @article {Idri2016151, title = {Systematic literature review of ensemble effort estimation}, journal = {Journal of Systems and Software}, volume = {118}, year = {2016}, note = {cited By 1}, pages = {151-175}, abstract = {The need to overcome the weaknesses of single estimation techniques for prediction tasks has given rise to ensemble methods in software development effort estimation (SDEE). An ensemble effort estimation (EEE) technique combines several of the single/classical models found in the SDEE literature. However, to the best of our knowledge, no systematic review has yet been performed with a focus on the use of EEE techniques in SDEE. The purpose of this review is to analyze EEE techniques from six viewpoints: single models used to construct ensembles, ensemble estimation accuracy, rules used to combine single estimates, accuracy comparison of EEE techniques with single models, accuracy comparison between EEE techniques and methodologies used to construct ensemble methods. We performed a systematic review of EEE studies published between 2000 and 2016, and we selected 24 of them to address the questions raised in this review. We found that EEE techniques may be separated into two types: homogeneous and heterogeneous, and that the machine learning single models are the most frequently employed in constructing EEE techniques. We also found that EEE techniques usually yield acceptable estimation accuracy, and in fact are more accurate than single models. {\textcopyright} 2016 Elsevier Inc. All rights reserved.}, doi = {10.1016/j.jss.2016.05.016}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969531836\&doi=10.1016\%2fj.jss.2016.05.016\&partnerID=40\&md5=4217c2107911348615a692540959501e}, author = {Idri, A.a and Hosni, M.a and Abran, A.b} } @article { ISI:000378664500010, title = {Systematic literature review of ensemble effort estimation}, journal = {JOURNAL OF SYSTEMS AND SOFTWARE}, volume = {118}, year = {2016}, month = {AUG}, pages = {151-175}, abstract = {The need to overcome the weaknesses of single estimation techniques for prediction tasks has given rise to ensemble methods in software development effort estimation (SDEE). An ensemble effort estimation (EEE) technique combines several of the single/classical models found in the SDEE literature. However, to the best of our knowledge, no systematic review has yet been performed with a focus on the use of EEE techniques in SDEE. The purpose of this review is to analyze EEE techniques from six viewpoints: single models used to construct ensembles, ensemble estimation accuracy, rules used to combine single estimates, accuracy comparison of EEE techniques with single models, accuracy comparison between EEE techniques and methodologies used to construct ensemble methods. We performed a systematic review of EEE studies published between 2000 and 2016, and we selected 24 of them to address the questions raised in this review. We found that EEE techniques may be separated into two types: homogeneous and heterogeneous, and that the machine learning single models are the most frequently employed in constructing EEE techniques. We also found that EEE techniques usually yield acceptable estimation accuracy, and in fact are more accurate than single models. (C) 2016 Elsevier Inc. All rights reserved.}, issn = {0164-1212}, doi = {10.1016/j.jss.2016.05.016}, author = {Idri, Ali and Hosni, Mohamed and Abran, Alain} } @conference { ISI:000386649000023, title = {Systematic Mapping Study of Dealing with Error in Software Development Effort Estimation}, booktitle = {2016 42ND EUROMICRO CONFERENCE ON SOFTWARE ENGINEERING AND ADVANCED APPLICATIONS (SEAA)}, year = {2016}, note = {42nd Euromicro Conference Series on Software Engineering and Advanced Applications (SEAA), Limassol, CYPRUS, AUG 31-SEP 02, 2016}, pages = {140-147}, publisher = {Univ Cyprus; Technolog Educ Inst Western Greece}, organization = {Univ Cyprus; Technolog Educ Inst Western Greece}, abstract = {Over the last decades, the software engineering community has investigated new techniques for software development effort estimation. Unfortunately, the estimates were not always accurate. Error approaches are then, an interesting track for improving the projects running performances and their financial profitability. The aim of this systematic mapping study is to summarize and synthesize the existing studies dealing with effort estimation error and uncertainty and to classify them based on research approaches, contribution types, accuracy criteria, datasets, error approaches and effort estimation techniques used. In total 19 papers published between 1990 and 2015 were selected. We observed a balance between the managerial approaches and the technical ones. Furthermore, the proposed error techniques and frameworks improve in general the accuracy of effort estimation techniques. Fuzzy logic, bootstrapping and risk analysis are promising avenues that could be combined with various estimation techniques.}, isbn = {978-1-5090-2819-1}, doi = {10.1109/SEAA.2016.39}, author = {El Koutbi, Salma and Idri, Ali and Abran, Alain} } @conference {Idri2016132, title = {Systematic mapping study of ensemble effort estimation}, booktitle = {ENASE 2016 - Proceedings of the 11th International Conference on Evaluation of Novel Software Approaches to Software Engineering}, year = {2016}, note = {cited By 0}, pages = {132-139}, abstract = {Ensemble methods have been used recently for prediction in data mining area in order to overcome the weaknesses of single estimation techniques. This approach consists on combining more than one single technique to predict a dependent variable and has attracted the attention of the software development effort estimation (SDEE) community. An ensemble effort estimation (EEE) technique combines several existing single/classical models. In this study, a systematic mapping study was carried out to identify the papers based on EEE techniques published in the period 2000-2015 and classified them according to five classification criteria: research type, research approach, EEE type, single models used to construct EEE techniques, and rule used the combine single estimates into an EEE technique. Publication channels and trends were also identified. Within the 16 studies selected, homogeneous EEE techniques were the most investigated. Furthermore, the machine learning single models were the most frequently employed to construct EEE techniques and two types of combiner (linear and non-linear) have been used to get the prediction value of an ensemble. Copyright {\textcopyright} 2016 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84979496940\&partnerID=40\&md5=8108d3f05a4d4c689c6237a0019141b7}, author = {Idri, A.a and Hosni, M.a and Abran, A.b} } @conference { ISI:000391107900013, title = {Systematic Mapping Study of Ensemble Effort Estimation}, booktitle = {ENASE: PROCEEDINGS OF THE 11TH INTERNATIONAL CONFERENCE ON EVALUATION OF NOVEL SOFTWARE APPROACHES TO SOFTWARE ENGINEERING}, year = {2016}, note = {11th International Conference on Evaluation of Novel Software Approaches to Software Engineering, Rome, ITALY, APR 27-28, 2016}, pages = {132-139}, abstract = {Ensemble methods have been used recently for prediction in data mining area in order to overcome the weaknesses of single estimation techniques. This approach consists on combining more than one single technique to predict a dependent variable and has attracted the attention of the software development effort estimation (SDEE) community. An ensemble effort estimation (EEE) technique combines several existing single/classical models. In this study, a systematic mapping study was carried out to identify the papers based on EEE techniques published in the period 2000-2015 and classified them according to five classification criteria: research type, research approach, EEE type, single models used to construct EEE techniques, and rule used the combine single estimates into an EEE technique. Publication channels and trends were also identified. Within the 16 studies selected, homogeneous EEE techniques were the most investigated. Furthermore, the machine learning single models were the most frequently employed to construct EEE techniques and two types of combiner (linear and non-linear) have been used to get the prediction value of an ensemble.}, isbn = {978-989-758-189-2}, doi = {10.5220/0005822701320139}, author = {Idri, Ali and Hosni, Mohamed and Abran, Alain}, editor = {Maciaszek, L and Filipe, J} } @conference { ISI:000381755100005, title = {Theoretical Analysis of BER Performance for Asynchronous FBMC Based Multi-cellular Networks with Non Linear Distortions}, booktitle = {ADVANCES IN UBIQUITOUS NETWORKING}, series = {Lecture Notes in Electrical Engineering}, volume = {366}, year = {2016}, note = {International Symposium on Ubiquitous Networking (UNet), Casablanca, MOROCCO, SEP 08-10, 2015}, pages = {53-61}, abstract = {In this paper, we present a theoretical analysis of bit error rate (BER) for asynchronous filter bank multicarrier (FBMC) based multi-cellular networks in the presence of high power amplifiers (HPA) nonlinear distortion (NLD). A promising class of FBMCmodulation called CosineModulated Multitone (CMT) is considered and the analytical BER is derived based on the signal to interference plus noise ratio (SINR) of a cellular network consisting of one reference mobile user (MU), one reference base station (BS) and K interfering BSs. The proposed model is evaluated and it is found in very good agreement with simulation results.}, isbn = {978-981-287-990-5; 978-981-287-989-9}, issn = {1876-1100}, doi = {10.1007/978-981-287-990-5\_5}, author = {Elmaroud, Brahim and Faqihi, Ahmed and Abbad, Mohammed and Aboutajdine, Driss}, editor = {Sabir, E and Medromi, H and Sadik, M} } @article {Elmaroud201653, title = {Theoretical analysis of BER performance for asynchronous FBMC based multi-cellular networks with non linear distortions}, journal = {Lecture Notes in Electrical Engineering}, volume = {366}, year = {2016}, note = {cited By 0}, pages = {53-61}, abstract = {In this paper, we present a theoretical analysis of bit error rate (BER) for asynchronous filter bank multicarrier (FBMC) based multi-cellular networks in the presence of high power amplifiers (HPA) nonlinear distortion (NLD). A promising class of FBMCmodulation called CosineModulated Multitone (CMT) is considered and the analytical BER is derived based on the signal to interference plus noise ratio (SINR) of a cellular network consisting of one reference mobile user (MU), one reference base station (BS) and K interfering BSs. The proposed model is evaluated and it is found in very good agreement with simulation results. {\textcopyright} Springer Science+Business Media Singapore 2016.}, doi = {10.1007/978-981-287-990-5_5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959166330\&doi=10.1007\%2f978-981-287-990-5_5\&partnerID=40\&md5=0544540b48d8d8ec9e084a310f5f3bea}, author = {Elmaroud, B.a and Faqihi, A.a b and Abbad, M.a and Aboutajdine, D.a} } @conference { ISI:000376431400036, title = {Theoretical Analysis of SINR Performance for Unsynchronized and Nonlinearly Distorted FBMC Signals}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015, VOL 1}, series = {Lecture Notes in Electrical Engineering}, volume = {380}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), MOROCCO, MAY 07-09, 2015}, pages = {345-353}, abstract = {In this paper, we present a theoretical analysis of the joint effect of carrier frequency offset (CFO) and high power amplifier (HPA) nonlinear distortion (NLD) on the signal to interference plus noise ratio (SINR) of filter bank multi-carrier (FBMC) systems. A promising class of FBMC modulation, called Cosine Modulated Multitone (CMT), is considered and the analytical SINR is derived in the presence of both HPA NLD and CFO. The simulation results have shown a good agreement with the theoretical analysis.}, isbn = {978-3-319-30301-7; 978-3-319-30299-7}, issn = {1876-1100}, doi = {10.1007/978-3-319-30301-7\_36}, author = {Elmaroud, Brahim and Faqihi, Ahmed and Abbad, Mohammed and Aboutajdine, Driss}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @article {Elmaroud2016345, title = {Theoretical analysis of SINR performance for unsynchronized and nonlinearly distorted FBMC signals}, journal = {Lecture Notes in Electrical Engineering}, volume = {380}, year = {2016}, note = {cited By 0}, pages = {345-353}, abstract = {In this paper, we present a theoretical analysis of the joint effect of carrier frequency offset (CFO) and high power amplifier (HPA) nonlinear distortion (NLD) on the signal to interference plus noise ratio (SINR) of filter bank multicarrier (FBMC) systems. A promising class of FBMC modulation, called Cosine Modulated Multitone (CMT), is considered and the analytical SINR is derived in the presence of both HPA NLD and CFO. The simulation results have shown a good agreement with the theoretical analysis. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30301-7_36}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964514091\&doi=10.1007\%2f978-3-319-30301-7_36\&partnerID=40\&md5=01dafa56c7f8076383e3c81b63f4afbc}, author = {Elmaroud, B.a and Faqihi, A.a b and Abbad, M.a and Aboutajdine, D.a} } @conference {Sara2016244, title = {Time aware recommendation}, booktitle = {Proceedings - 6th International Conference on Information and Communication Technology for the Muslim World, ICT4M 2016}, year = {2016}, note = {cited By 0}, pages = {244-247}, abstract = {The overload of information can become a significant challenge in relation to information retrieval systems. Often users will need to carry out extensive research to get the information they desire. This issue will only become more challenging as the quantity of data available on the internet increases. This increase shows no signs of slowing down and inevitably demands better solutions. One such solution proposed in this paper will look at the quality of the service discovery, such as adaptation customizing recommendation. In our project we considered ways to customize the contextual recommendation by creating a time awareness system. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICT4M.2016.75}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013052828\&doi=10.1109\%2fICT4M.2016.75\&partnerID=40\&md5=2f395fcbdfe337e52f75ed942f05dffe}, author = {Sara, A.a and El Bouzekri El Idrissi, Y.b and Ajhoun, R.a} } @conference { ISI:000385280000017, title = {Toward a Measurement Based E-Government Portals{\textquoteright} Benchmarking Framework}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015 (MEDCT 2015), VOL 2}, series = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), Saidia, MOROCCO, MAY 07-09, 2015}, pages = {161-169}, abstract = {E-government benchmarking is the process of classifying e-government according to agreed best practices or standards. It can help agencies enhance their portals{\textquoteright} quality by identifying the missing best practices, and providing guidelines to implement them. The aim of this paper is to introduce a benchmarking framework for e-government portals based on measurement of best practices. We have first identified and presented two examples of the benchmarking frameworks available in the literature. Based on the comparison conducted, the findings show that although the benchmarking frameworks are serving their intended purposes, they still suffer from some limitations. The paper also highlights how the new framework differs from the other frameworks and overcomes their limitations.}, isbn = {978-3-319-30298-0; 978-3-319-30296-6}, issn = {1876-1100}, doi = {10.1007/978-3-319-30298-0\_17}, author = {Fath-Allah, Abdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @article {Faqihi2016215, title = {Toward a new treatment approach of learning content in cloud era}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {83}, number = {2}, year = {2016}, note = {cited By 0}, pages = {215-226}, abstract = {Nowadays, Technological development of human beings is extremely related to information. This is proved on the manners and areas that life was computerized such as in trade, government services, medicine, education, learning etc.Nevertheless, the fast development of designing information systems has created several sub-systems in multiple contexts which are conceived by different communities and totally dispersed geographically but all undertaking the same area. Neither contents nor services that these subsystems are made are certainly in the same technology environments. In our research, the learning field goes through many key steps. Actually, new revolutionized practices were implemented due to technology innovation, so transition from classical learning towards distance learning or d-learning is more than possible, it{\textquoteright}s desired. Consequently, this phenomenon has created more opportunities for learners and teachers but also several challenges; in many cases, the multitude of standards hinders the learner migration from a learning environment to another, so it hampers its learning development.In this paper, we will propose a framework of interoperability based on three levels. Since we are interested in semantic level, we propose a process of interoperability of learning content in the cloud era based on a global ontology. Like recommendation systems, we will start our process by acquisition, then validation and finally structuration of the learning content. This structuration way will give to both actors of learning environment a certain flexibility and access to other resources in Cloud environment. The basic principle is to collect content, to enrich it and to make it interoperable by using unified approach in star based on a comprehensive ontology. Our work is a part of MADAR project which is {\textquotedblleft}Learning Architecture Adapted to Mobile Technology{\textquotedblright}. {\textcopyright} 2005 - 2015 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84955166049\&partnerID=40\&md5=211c373530d79c2ab171cd1f8c4a6833}, author = {Faqihi, B. and Daoudi, N. and Ajhoun, R.} } @conference { ISI:000391420300010, title = {Towards a generic model of a user profile}, booktitle = {2016 11TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, series = {International Conference on Intelligent Systems, Theories and Applications}, year = {2016}, note = {11th International Conference on Intelligent Systems - Theories and Applications (SITA), Mohammedia, MOROCCO, OCT 19-20, 2016}, publisher = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, organization = {IEEE; Univ Hassan Casablanca; IEEE Morocco Sect; Fac Sci Tech Mohammedia}, abstract = {User modelling is an old research discipline. The main concern of this discipline is to improve the quality of human-computer interaction predictive goals, preferences and context. Thus, adaptation and personalization of a document or an application for a particular user need to have information on the latter. It often referred to as {\textquoteleft}{\textquoteleft}user profile{{\textquoteright}{\textquoteright}}. A user profile modelling process must be done in two stages. These can be expressed by two questions: (1) {\textquoteleft}{\textquoteleft}what data?{{\textquoteright}{\textquoteright}} and (2) {\textquoteleft}{\textquoteleft}In what form will they be organized?{{\textquoteright}{\textquoteright}} The answer to the first question will determine all relevant information that best represents the interests and needs of the user. As for the second, it will determine the logical structure in which a profile will be modelled. This paper aims to provide answers to both questions. To do this, we begin with a presentation of different areas where the user profile can make a major contribution. In the second step, we define all the information to be included in the user profile as well as a generic model that we can adapt to different areas.}, isbn = {978-1-5090-5781-8}, issn = {2378-2528}, author = {Anter, Samir and El Yazidi, Mly Hafid and Zellou, Ahmed and Idri, Ali} } @conference {Anter2016, title = {Towards a generic model of a user profile}, booktitle = {SITA 2016 - 11th International Conference on Intelligent Systems: Theories and Applications}, year = {2016}, note = {cited By 0}, abstract = {User modelling is an old research discipline. The main concern of this discipline is to improve the quality of human-computer interaction predictive goals, preferences and context. Thus, adaptation and personalization of a document or an application for a particular user need to have information on the latter. It often referred to as {\textquoteright}user profile{\textquoteright}. A user profile modelling process must be done in two stages. These can be expressed by two questions: (1) {\textquoteright}what data?{\textquoteright} and (2) {\textquoteright}In what form will they be organized?{\textquoteright} The answer to the first question will determine all relevant information that best represents the interests and needs of the user. As for the second, it will determine the logical structure in which a profile will be modelled. This paper aims to provide answers to both questions. To do this, we begin with a presentation of different areas where the user profile can make a major contribution. In the second step, we define all the information to be included in the user profile as well as a generic model that we can adapt to different areas. {\textcopyright} 2016 IEEE.}, doi = {10.1109/SITA.2016.7772265}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010301085\&doi=10.1109\%2fSITA.2016.7772265\&partnerID=40\&md5=2f2efa11599627d6b4c5ad967869851f}, author = {Anter, S. and Yazidi, M.H.E. and Zellou, A. and Idri, A.} } @conference {Chabibi2016, title = {Towards an alignment of SysML and simulation tools}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, volume = {2016-July}, year = {2016}, note = {cited By 1}, abstract = {Even if it is considered as an effective language for system modeling because of the descriptive aspect of its diagrams, SysML (System Modeling Language) is insufficient for verification of their behavior. This lack is accentuated by the increasing complexity of recent systems. In order to conduct behavior verifications, designers use simulation tools to realize experiments on the studied system. Thus, the efficiency of the engineering process is often reduced because of the separate and consecutive use of both SysML modeling and simulation tools. As a consequence, various research works focused on unifying the potential provided by the SysML language and simulation environments. We propose in this paper to study links taxonomy between SysML and various existing simulation environments. The ultimate goal of this study is to consider the most optimal passage from SysML to various simulation tools. A common environment based on models and modern techniques of model-based engineering will handle this transformation. {\textcopyright} 2015 IEEE.}, doi = {10.1109/AICCSA.2015.7507216}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84980349948\&doi=10.1109\%2fAICCSA.2015.7507216\&partnerID=40\&md5=963c125b0daeba5c1d58c329f95e4ef1}, author = {Chabibi, B.a and Anwar, A.b and Nassar, M.a} } @conference { ISI:000390888900008, title = {Towards an Efficient Policy Evaluation Process in Multi-Tenancy Cloud Environments}, booktitle = {CCSW{\textquoteright}16: PROCEEDINGS OF THE 2016 ACM CLOUD COMPUTING SECURITY WORKSHOP}, year = {2016}, note = {8th ACM Cloud Computing Security Workshop (CCSW), Vienna, AUSTRIA, OCT 28, 2016}, pages = {55-59}, publisher = {ACM SIGSAC; ACM}, organization = {ACM SIGSAC; ACM}, abstract = {Cloud computing offers most of its services under multi-tenancy environments. To satisfy security requirements among collaborating tenants, each tenant may define a set of access control policies to secure access to shared data. Several cloud solutions make use of XACML to specify such policies. However, existing implementations of XACML perform a brute force search to compare a request to all existing rules in a given XACML policy. This decreases the decision process (i.e., policy evaluation) performance especially for policies with a large number of rules. In this paper, we propose an automata-based approach for an efficient XACML policy evaluation. We implemented our approach in a cloud policy engine called X2Automata. The engine first converts both XACML policies and access requests to automata. Second, it combines the two automata by a synchronous product. Third, it applies an evaluation procedure to the resulting automaton to decide whether an access request is granted or not. To highlight the efficiency of X2Automata, we compare its performance, based on the OpenStack cloud environment, with the XACML implementation named Balana.}, isbn = {978-1-4503-4572-9}, doi = {10.1145/2996429.2996431}, author = {Ayache, Meryeme and Erradi, Mohammed and Freisleben, Bernd and Khoumsi, Ahmed} } @conference {Ayache201655, title = {Towards an efficient policy evaluation process in multi-tenancy cloud environments}, booktitle = {CCSW 2016 - Proceedings of the 2016 ACM Cloud Computing Security Workshop, co-located with CCS 2016}, year = {2016}, note = {cited By 0}, pages = {55-59}, abstract = {Cloud computing offers most of its services under multi-tenancy environments. To satisfy security requirements among collaborating tenants, each tenant may define a set of access control policies to secure access to shared data. Several cloud solutions make use of XACML to specify such policies. However, existing implementations of XACML perform a brute force search to compare a request to all existing rules in a given XACML policy. This decreases the decision process (i.e., policy evaluation) performance especially for policies with a large number of rules. In this paper, we propose an automata-based approach for an efficient XACML policy evaluation. We implemented our approach in a cloud policy engine called X2Automata. The engine first converts both XACML policies and access requests to automata. Second, it combines the two automata by a synchronous product. Third, it applies an evaluation procedure to the resulting automaton to decide whether an access request is granted or not. To highlight the efficiency of X2Automata, we compare its performance, based on the OpenStack cloud environment, with the XACML implementation named Balana. {\textcopyright} 2016 ACM.}, doi = {10.1145/2996429.2996431}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85001655008\&doi=10.1145\%2f2996429.2996431\&partnerID=40\&md5=8259d7215d88c0a1a7a94d837b9564a3}, author = {Ayache, M.a and Erradi, M.a and Freisleben, B.b and Khoumsi, A.c} } @conference {ElBassiti201699, title = {Towards innovation excellence: Why and how to measure innovation performance?}, booktitle = {Proceedings - 6th International Conference on Information and Communication Technology for the Muslim World, ICT4M 2016}, year = {2016}, note = {cited By 0}, pages = {99-104}, abstract = {To maintain competitive advantage, today organizations need to be able to innovate - not just occasionally, but consistently. Mastering the process of innovation requires identifying the factors that support or hamper the achievement of innovations. The success of such process usually depends on the quality of the best opportunity identified, which is not enough. So, a systematic research and delivery framework spawning a set of performance measurements and improvement metrics is required, because, what is not measurable cannot be neither managed nor improved. This paper identifies three complementary components specifically developed to enable such measurement. First, Innovation Granularity Scales enabling highly targeted yet flexible performance analysis ranging from knowledge assessment to high level progressions and improvements; Second, Innovation Capability Stages referring to the minimum capabilities required by transformational milestones along the innovation continuum; Third, Innovation Maturity Levels representing the quality, predictability and performance within the innovation stages. This paper explores these complementary components and presents them as a systematic model underlying a specified innovation performance measurement framework. {\textcopyright} 2016 IEEE.}, doi = {10.1109/ICT4M.2016.73}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013042290\&doi=10.1109\%2fICT4M.2016.73\&partnerID=40\&md5=b94e0c3f51b5c6c4d3a7825a33a2bc8e}, author = {El Bassiti, L. and Ajhoun, R.} } @article { ISI:000375706200001, title = {Usability evaluation of mobile applications using ISO 9241 and ISO 25062 standards}, journal = {SPRINGERPLUS}, volume = {5}, year = {2016}, month = {APR 29}, abstract = {This paper presents an empirical study based on a set of measures to evaluate the usability of mobile applications running on different mobile operating systems, including Android, iOS and Symbian. The aim is to evaluate empirically a framework that we have developed on the use of the Software Quality Standard ISO 9126 in mobile environments, especially the usability characteristic. To do that, 32 users had participated in the experiment and we have used ISO 25062 and ISO 9241 standards for objective measures by working with two widely used mobile applications: Google Apps and Google Maps. The QUIS 7.0 questionnaire have been used to collect measures assessing the users{\textquoteright} level of satisfaction when using these two mobile applications. By analyzing the results we highlighted a set of mobile usability issues that are related to the hardware as well as to the software and that need to be taken into account by designers and developers in order to improve the usability of mobile applications.}, issn = {2193-1801}, doi = {10.1186/s40064-016-2171-z}, author = {Moumane, Karima and Idri, Ali and Abran, Alain} } @article {Moumane2016, title = {Usability evaluation of mobile applications using ISO 9241 and ISO 25062 standards}, journal = {SpringerPlus}, volume = {5}, number = {1}, year = {2016}, note = {cited By 3}, abstract = {This paper presents an empirical study based on a set of measures to evaluate the usability of mobile applications running on different mobile operating systems, including Android, iOS and Symbian. The aim is to evaluate empirically a framework that we have developed on the use of the Software Quality Standard ISO 9126 in mobile environments, especially the usability characteristic. To do that, 32 users had participated in the experiment and we have used ISO 25062 and ISO 9241 standards for objective measures by working with two widely used mobile applications: Google Apps and Google Maps. The QUIS 7.0 questionnaire have been used to collect measures assessing the users{\textquoteright} level of satisfaction when using these two mobile applications. By analyzing the results we highlighted a set of mobile usability issues that are related to the hardware as well as to the software and that need to be taken into account by designers and developers in order to improve the usability of mobile applications. {\textcopyright} 2016, Moumane et al.}, doi = {10.1186/s40064-016-2171-z}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964771111\&doi=10.1186\%2fs40064-016-2171-z\&partnerID=40\&md5=a8cf9ebe6e65c0ea9863ef6d1ce8bca8}, author = {Moumane, K.a and Idri, A.a and Abran, A.b} } @conference { ISI:000391354500075, title = {UWB Thin Film Flexible Antenna for Microwave Thermography for Breast Cancer Detection}, booktitle = {2016 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT)}, year = {2016}, note = {2nd International Conference on Electrical and Information Technologies (ICEIT), Tangier, MOROCCO, MAY 04-07, 2016}, pages = {425-429}, abstract = {A novel low-cost flexible antenna for a non-invasive and highly sensitive method to detect malign tumors in the early stages, contributing to diminishing the mortality which appears in the cases where the breast cancer was detected in late incurable stages. Using thin film Kapton substrate the flexible antenna has been built and carried out by the use of CST Microwave Studio software. The proposed structure operate in S-band (with reflection coefficient (S11) below -10dB) and has an ultra-wide band (UWB) characteristic providing an important bandwidth of about 2,4 GHz with a center frequency around 3 GHz. In order to predict the effect of the human body on the S-parameter response of the developed antenna design, an inhomogeneous multi-layer model of the women breast is used. The simulated results indicate that the proposed antenna kept it UWB property. The overall size of the miniaturized antenna is 20mmx22mm which make it very suitable for a radar-based breast cancer detection system.}, isbn = {978-1-4673-8469-8}, author = {Afyf, Amal and Bellarbi, Larbi and Achour, Anouar and Yaakoubi, Nourdin and Errachid, Abdelhamid and Sennouni, M. Adel}, editor = {Essaaidi, M and ElHani, S} } @conference {Afyf2016425, title = {UWB thin film flexible antenna for microwave thermography for breast cancer detection}, booktitle = {Proceedings of 2016 International Conference on Electrical and Information Technologies, ICEIT 2016}, year = {2016}, note = {cited By 0}, pages = {425-429}, abstract = {A novel low-cost flexible antenna for a non-invasive and highly sensitive method to detect malign tumors in the early stages, contributing to diminishing the mortality which appears in the cases where the breast cancer was detected in late incurable stages. Using thin film Kapton substrate the flexible antenna has been built and carried out by the use of CST Microwave Studio software. The proposed structure operate in S-band (with reflection coefficient (S11) below-10dB) and has an ultra-wide band (UWB) characteristic providing an important bandwidth of about 2,4 GHz with a center frequency around 3 GHz. In order to predict the effect of the human body on the S-parameter response of the developed antenna design, an inhomogeneous multi-layer model of the women breast is used. The simulated results indicate that the proposed antenna kept it UWB property. The overall size of the miniaturized antenna is 20mm{\texttimes}22mm which make it very suitable for a radar-based breast cancer detection system. {\textcopyright} 2016 IEEE.}, doi = {10.1109/EITech.2016.7519635}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992195805\&doi=10.1109\%2fEITech.2016.7519635\&partnerID=40\&md5=348267fc2a9e46d247350579e616bbe7}, author = {Afyf, A.a and Bellarbi, L.a and Achour, A.a and Yaakoubi, N.b and Errachid, A.c and Adel Sennouni, M.d} } @conference { ISI:000386649000017, title = {A Validation of a Measurement Based E-government Portals{\textquoteright} Maturity Model}, booktitle = {2016 42ND EUROMICRO CONFERENCE ON SOFTWARE ENGINEERING AND ADVANCED APPLICATIONS (SEAA)}, year = {2016}, note = {42nd Euromicro Conference Series on Software Engineering and Advanced Applications (SEAA), Limassol, CYPRUS, AUG 31-SEP 02, 2016}, pages = {100-107}, publisher = {Univ Cyprus; Technolog Educ Inst Western Greece}, organization = {Univ Cyprus; Technolog Educ Inst Western Greece}, abstract = {An e-government portal{\textquoteright}s maturity model is a set of stages (from basic to advanced ones) that determines the maturity of e-government portals. In fact, these models can be used to provide directions and recommendations for agencies to improve their portals{\textquoteright} maturity. However, before choosing a maturity model by any agency, it is important to know to which extent the e-government community agree or disagree with the model. In previous research studies, we have built an e-government portals{\textquoteright} maturity model that is based on a best practice model. The aim of this paper is to validate this new model by e-government experts using a survey to prove that the model is valid and reliable. For this purpose, we have described the components of this model, and the previous work that has been done to build it. Based on the results of the survey, our findings show that the new model has proven its validity.}, isbn = {978-1-5090-2819-1}, doi = {10.1109/SEAA.2016.38}, author = {Fath-Allah, Abdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali} } @conference { ISI:000391354500008, title = {Virtualization in Cloud Computing: NoHype vs HyperWall}, booktitle = {2016 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT)}, year = {2016}, note = {2nd International Conference on Electrical and Information Technologies (ICEIT), Tangier, MOROCCO, MAY 04-07, 2016}, pages = {49-54}, abstract = {In the world of Cloud Computing, virtualization and virtual environments are fundamental basics for data sharing. It provides the guest user with the elements needed to execute his request, while it gives the provider the ability to be housing different guests without risking the security and integrity of data. Virtualization is based on a central component, called a hypervisor, having extra-privileges, which makes it the key component capable of managing the sharing of data and resources. In this paper, we will be analyzing different approaches which try to solve the security issues of hypervisors, and we will try to add our own contribution to increase help solving the problem of security of hypervisors based architectures.}, isbn = {978-1-4673-8469-8}, author = {Alouane, Meryeme and El Bakkali, Hanan}, editor = {Essaaidi, M and ElHani, S} } @conference {Alouane201649, title = {Virtualization in Cloud Computing: NoHype vs HyperWall new approach}, booktitle = {Proceedings of 2016 International Conference on Electrical and Information Technologies, ICEIT 2016}, year = {2016}, note = {cited By 0}, pages = {49-54}, abstract = {In the world of Cloud Computing, virtualization and virtual environments are fundamental basics for data sharing. It provides the guest user with the elements needed to execute his request, while it gives the provider the ability to be housing different guests without risking the security and integrity of data. Virtualization is based on a central component, called a hypervisor, having extra-privileges, which makes it the key component capable of managing the sharing of data and resources. In this paper, we will be analyzing different approaches which try to solve the security issues of hypervisors, and we will try to add our own contribution to increase help solving the problem of security of hypervisors based architectures. {\textcopyright} 2016 IEEE.}, doi = {10.1109/EITech.2016.7519629}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992192250\&doi=10.1109\%2fEITech.2016.7519629\&partnerID=40\&md5=544c725260cc9a240cc98001c3225484}, author = {Alouane, M. and El Bakkali, H.} } @conference { ISI:000392265300025, title = {Who Can Help to Review This Piece of Code?}, booktitle = {COLLABORATION IN A HYPERCONNECTED WORLD}, series = {IFIP Advances in Information and Communication Technology}, volume = {480}, year = {2016}, note = {17th IFIP WG 5.5 Working Conference on Virtual Enterprises (PRO-VE), Porto, PORTUGAL, OCT 03-05, 2016}, pages = {289-301}, publisher = {IFIP WG 5 5 Co Operat Infrastructure Virtual Enterprises \& Elect Business; Soc Collaborat Networks; U Porto; INESCTEC; Univ Amsterdam; Nova Univ Lisbon; UNINOVA}, organization = {IFIP WG 5 5 Co Operat Infrastructure Virtual Enterprises \& Elect Business; Soc Collaborat Networks; U Porto; INESCTEC; Univ Amsterdam; Nova Univ Lisbon; UNINOVA}, abstract = {Successful software projects require collaboration between team members. Efficient collaboration relies on both technical and social linkages. In this paper, we investigate whether a socio-technical analysis can support software contributors in identifying experts helping to review their source code. We mined the histories of five open source projects (OSS) from GitHub and examined both technical and socio-technical interactions based on Social Network Analysis (SNA). Mapping communication network to files co-edition network shows the existence of collaboration patterns between core teams and peripherals in the studied OSS projects. Our main contribution is the construction and mapping of three sources of social networks, in which contributors interact by co-editing, commenting or reviewing. We were able to identify behavioral patterns between core teams and peripherals related to the activity of code review. Our findings have implications on improving collaboration between contributors within virtual OSS communities witch drive teams{\textquoteright} performance and software products quality.}, isbn = {978-3-319-45390-3; 978-3-319-45389-7}, issn = {1868-4238}, doi = {10.1007/978-3-319-45390-3\_25}, author = {Kerzazi, Noureddine and El Asri, Ikram}, editor = {Afsarmanesh, H and CamarinhaMatos, LM and Soares, AL} } @article {Kerzazi2016289, title = {Who can help to review this piece of code?}, journal = {IFIP Advances in Information and Communication Technology}, volume = {480}, year = {2016}, note = {cited By 0}, pages = {289-301}, abstract = {Successful software projects require collaboration between team members. Efficient collaboration relies on both technical and social linkages. In this paper, we investigate whether a socio-technical analysis can support software contributors in identifying experts helping to review their source code. We mined the histories of five open source projects (OSS) from GitHub and examined both technical and socio-technical interactions based on Social Network Analysis (SNA). Mapping communication network to files co-edition network shows the existence of collaboration patterns between core teams and peripherals in the studied OSS projects. Our main contribution is the construction and mapping of three sources of social networks, in which contributors interact by co-editing, commenting or reviewing. We were able to identify behavioral patterns between core teams and peripherals related to the activity of code review. Our findings have implications on improving collaboration between contributors within virtual OSS communities witch drive teams{\textquoteright} performance and software products quality. {\textcopyright} IFIP International Federation for Information Processing 2016.}, doi = {10.1007/978-3-319-45390-3_25}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84989818867\&doi=10.1007\%2f978-3-319-45390-3_25\&partnerID=40\&md5=b8f6fddd6f538b75db5950c892ceceb0}, author = {Kerzazi, N. and El Asri, I.} } @conference { ISI:000382224200013, title = {Who Needs Release and DevOps Engineers, and Why?}, booktitle = {INTERNATIONAL WORKSHOP ON CONTINUOUS SOFTWARE EVOLUTION AND DELIVERY, CSED 2016}, year = {2016}, note = {1st International Workshop on Continuous Software Evolution and Delivery (CSED), Austin, TX, MAY 14-15, 2016}, pages = {77-83}, publisher = {Assoc Comp Machinery; IEEE Comp Soc; IEEE Tech Council Software Engn; ACM Special Interest Grp Software Engn}, organization = {Assoc Comp Machinery; IEEE Comp Soc; IEEE Tech Council Software Engn; ACM Special Interest Grp Software Engn}, abstract = {The recent surge in interest in continuous delivery has opened up the job market for release and DevOps engineers. However, despite an increasing number of conferences and publications on continuous delivery, smaller companies and start-ups still have a hard time determining the core tasks their future release and DevOps engineers should be responsible for (and what the differences between those two roles are), while universities are not sure what essential techniques and skills they should teach to their students. This paper performs an empirical analysis of online job postings to determine and compare the main tasks of release and DevOps engineers, globally and across countries. Our qualitative analysis shows that automation is the most important activity across the three roles, as articulated in job posting description data, and that the release engineer role combines the top activities of the DevOps and more traditional build engineer roles. Finally, different countries have a moderate degree of similarity between their ads, although each country has its specific focus.}, isbn = {978-1-4503-4157-8}, doi = {10.1145/2896941.2896957}, author = {Kerzazi, Noureddine and Adams, Bram} } @conference {Kerzazi201677, title = {Who needs release and DevOps engineers, and why?}, booktitle = {Proceedings - International Workshop on Continuous Software Evolution and Delivery, CSED 2016}, year = {2016}, note = {cited By 1}, pages = {77-83}, abstract = {The recent surge in interest in continuous delivery has opened up the job market for release and DevOps engineers. However, despite an increasing number of conferences and publications on continuous delivery, smaller companies and startups still have a hard time determining the core tasks their future release and DevOps engineers should be responsible for (and what the differences between those two roles are), while universities are not sure what essential techniques and skills they should teach to their students. This paper performs an empirical analysis of online job postings to determine and compare the main tasks of release and DevOps engineers, globally and across countries. Our qualitative analysis shows that automation is the most important activity across the three roles, as articulated in job posting description data, and that the release engineer role combines the top activities of the DevOps and more traditional build engineer roles. Finally, different countries have a moderate degree of similarity between their ads, although each country has its specific focus. {\textcopyright} 2016 ACM.}, doi = {10.1145/2896941.2896957}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84984645277\&doi=10.1145\%2f2896941.2896957\&partnerID=40\&md5=2c7371a5581be3d091448050ed74c353}, author = {Kerzazi, N.a and Adams, B.b} } @conference {Ayache201526, title = {Access control policies enforcement in a cloud environment: Openstack}, booktitle = {Proceedings of the 2015 11th International Conference on Information Assurance and Security, IAS 2015}, year = {2015}, note = {cited By 1}, pages = {26-31}, abstract = {Cloud computing has become a widely used paradigm in many IT domains such as e-health. It offers several advantages to the users, e.g. elasticity, flexibility and the rapid sharing of a huge set of digital data. However, many security and privacy concerns still pose significant challenges. In particular, the most identified problem is how to enforce the user{\textquoteright}s security policy in the access control of the outsourced data. In fact, cloud environments does not provide facilities to support high level defined security policies. For instance, the swift storage component of openstack supports only fine grained access control to execute a specific action on a specific defined object. In this paper, we designed and implemented a middleware to provide high level security policies while using such swift fine grained primitives. An e-health collaborative application dedicated for remote diagnosis is used to illustrate the suggested approach. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISIAS.2015.7492740}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84979530321\&doi=10.1109\%2fISIAS.2015.7492740\&partnerID=40\&md5=bc070d8bb2973f9f221582272c86a588}, author = {Ayache, M.a and Erradi, M.a and Freisleben, B.b} } @article {Idri2015, title = {Accuracy Comparison of Analogy-Based Software Development Effort Estimation Techniques}, journal = {International Journal of Intelligent Systems}, year = {2015}, note = {cited By 1; Article in Press}, abstract = {Estimation by analogy is a commonly used software effort estimation technique and a suitable alternative to other conventional estimation techniques: It predicts the effort of the target project using information from former similar projects. While it is relatively easy to handle numerical attributes, dealing with categorical attributes is one of the most difficult issues for analogy-based estimation techniques. Therefore, we propose, in this paper, a novel analogy-based approach, called 2FA-kprototypes, to predict effort when software projects are described by a mix of numerical and categorical attributes. To this aim, the well-known fuzzy k-prototypes algorithm is integrated into the process of estimation by analogy. The estimation accuracy of 2FA-kprototypes was evaluated and compared with that of two techniques: (1) classical analogy-based technique and (2) 2FA-kmodes, which is a technique that we have developed recently. The comparison was performed using four data sets that are quite diverse and have different sizes: ISBSG, COCOMO, USP05-FT, and USP05-RQ. The results obtained showed that both 2FA-kprototypes and 2FA-kmodes perform better than classical analogy. {\textcopyright} 2015 Wiley Periodicals, Inc.}, doi = {10.1002/int.21748}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938886103\&doi=10.1002\%2fint.21748\&partnerID=40\&md5=d6d3bf98e8678fff671b7bc18b1febd5}, author = {Idri, A.a and Amazal, F.A.a and Abran, A.b} } @conference { ISI:000383221800012, title = {ACTOR NETWORK THEORY AS A COLLABORATIVE MODE: THE CONTRIBUTION OF GAME THEORY IN THE INTERESSEMENT PHASE}, booktitle = {PROCEEDINGS OF THE 2015 5TH WORLD CONGRESS ON INFORMATION AND COMMUNICATION TECHNOLOGIES (WICT)}, year = {2015}, note = {5th World Congress on Information and Communication Technologies (WICT), Marrakesh, MOROCCO, DEC 14-16, 2015}, pages = {67-72}, publisher = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, organization = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, abstract = {Global Governance of projects requires cooperation between several actors. In general, such cooperation is ensured by building network collaboration between entities who want to collaborate. In spite of the existence of a number of works interested in collaboration network, only few of them were focused on how to construct a network. In this paper, we address this topic through Actor Network Theory. In particular way, we analyze interessement phase of ANT from a cooperative game point of view. Indeed, it{\textquoteright}s about negotiations between actors involved in business project. Our objective is to propose an approach of network establishment, by inciting actors through cost savings. For that, we use Shapley Value to answer the question: Which coalitions are likely to form in order to ensure best cost-saving objectives in ANT mode of collaboration? We propose also a graphical tool for visualizing networks and simulating their evolution.}, isbn = {978-1-4673-8712-5}, author = {Benqatla, Mohammed Salim and Dikra, Chikhaoui and Bounabat, Bouchaib}, editor = {Abrahim, A and Alimi, AM and Haqiq, A and Karray, H and Mousannif, H and BenHalima, M and Choo, YH and Ma, K} } @article {Lhoussain2015127, title = {Adaptating the levenshtein distance to contextual spelling correction}, journal = {International Journal of Computer Science and Applications}, volume = {12}, number = {1}, year = {2015}, note = {cited By 1}, pages = {127-133}, abstract = {In the last few years, computing environments for human learning have rapidly evolved due to the development of information and communication technologies. However, the use of information technology in automatic correction of spelling errors has become increasingly essential. In this context, we have developed a system for correcting spelling errors in the Arabic language based on language models and Levenshtein algorithm. The metric distance returned by the Levenshtein algorithm is often the same for multiple solutions in correcting a wrong word. To overcome this limitation we have added a weighting based on language models. This combination has helped us to screen and refine the results obtained in advance by the Levenshtein algorithm, and applied to the errors of Arabic words. The results are encouraging and demonstrate the value of this approach. {\textcopyright} Technomathematics Research Foundation.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84925438223\&partnerID=40\&md5=d40d9b32ebd59c63f8fed1f88ff15e47}, author = {Lhoussain, A.S.a and Hicham, G.b and Abdellah, Y.c} } @article {Idri2015206, title = {Analogy-based software development effort estimation: A systematic mapping and review}, journal = {Information and Software Technology}, volume = {58}, year = {2015}, note = {cited By 16}, pages = {206-230}, abstract = {Context: Analogy-based Software development Effort Estimation (ASEE) techniques have gained considerable attention from the software engineering community. However, existing systematic map and review studies on software development effort prediction have not investigated in depth several issues of ASEE techniques, to the exception of comparisons with other types of estimation techniques. Objective: The objective of this research is twofold: (1) to classify ASEE studies which primary goal is to propose new or modified ASEE techniques according to five criteria: research approach, contribution type, techniques used in combination with ASEE methods, and ASEE steps, as well as identifying publication channels and trends and (2) to analyze these studies from five perspectives: estimation accuracy, accuracy comparison, estimation context, impact of the techniques used in combination with ASEE methods, and ASEE tools. Method: We performed a systematic mapping of studies for which the primary goal is to develop or to improve ASEE techniques published in the period 1990-2012, and reviewed them based on an automated search of four electronic databases. Results: In total, we identified 65 studies published between 1990 and 2012, and classified them based on our predefined classification criteria. The mapping study revealed that most researchers focus on addressing problems related to the first step of an ASEE process, that is, feature and case subset selection. The results of our detailed analysis show that ASEE methods outperform the eight techniques with which they were compared, and tend to yield acceptable results especially when combining ASEE techniques with Fuzzy Logic (FL) or Genetic Algorithms (GA). Conclusion: Based on the findings of this study, the use of other techniques such FL and GA in combination with an ASEE method is promising to generate more accurate estimates. However, the use of ASEE techniques by practitioners is still limited: developing more ASEE tools may facilitate the application of these techniques and then lead to increasing the use of ASEE techniques in industry. {\textcopyright} 2014 Elsevier B.V. All rights reserved.}, doi = {10.1016/j.infsof.2014.07.013}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84914109039\&doi=10.1016\%2fj.infsof.2014.07.013\&partnerID=40\&md5=70ffdeca252a6c01dde0070da6ecde9c}, author = {Idri, A.a and Amazal, F.A.a and Abran, A.b} } @article { ISI:000347022800012, title = {Analogy-based software development effort estimation: A systematic mapping and review}, journal = {INFORMATION AND SOFTWARE TECHNOLOGY}, volume = {58}, year = {2015}, month = {FEB}, pages = {206-230}, abstract = {Context: Analogy-based Software development Effort Estimation (ASEE) techniques have gained considerable attention from the software engineering community. However, existing systematic map and review studies on software development effort prediction have not investigated in depth several issues of ASEE techniques, to the exception of comparisons with other types of estimation techniques. Objective: The objective of this research is twofold: (1) to classify ASEE studies which primary goal is to propose new or modified ASEE techniques according to five criteria: research approach, contribution type, techniques used in combination with ASEE methods, and ASEE steps, as well as identifying publication channels and trends and (2) to analyze these studies from five perspectives: estimation accuracy, accuracy comparison, estimation context, impact of the techniques used in combination with ASEE methods, and ASEE tools. Method: We performed a systematic mapping of studies for which the primary goal is to develop or to improve ASEE techniques published in the period 1990-2012, and reviewed them based on an automated search of four electronic databases. Results: In total, we identified 65 studies published between 1990 and 2012, and classified them based on our predefined classification criteria. The mapping study revealed that most researchers focus on addressing problems related to the first step of an ASEE process, that is, feature and case subset selection. The results of our detailed analysis show that ASEE methods outperform the eight techniques with which they were compared, and tend to yield acceptable results especially when combining ASEE techniques with Fuzzy Logic (FL) or Genetic Algorithms (GA). Conclusion: Based on the findings of this study, the use of other techniques such FL and GA in combination with an ASEE method is promising to generate more accurate estimates. However, the use of ASEE techniques by practitioners is still limited: developing more ASEE tools may facilitate the application of these techniques and then lead to increasing the use of ASEE techniques in industry. (C) 2014 Elsevier B.V. All rights reserved.}, issn = {0950-5849}, doi = {10.1016/j.infsof.2014.07.013}, author = {Idri, Ali and Amazal, Fatima Azzahra and Abran, Alain} } @article {Assad2015, title = {Analysis of the deployment quality for intrusion detection in wireless sensor networks}, journal = {Journal of Computer Networks and Communications}, volume = {2015}, year = {2015}, note = {cited By 0}, abstract = {The intrusion detection application in a homogeneous wireless sensor network is defined as a mechanism to detect unauthorized intrusions or anomalous moving attackers in a field of interest. The quality of deterministic sensor nodes deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. An area may require that multiple nodes monitor each point from the sensing area; this constraint is known as k-coverage where k is the number of nodes. The deployment quality of sensor nodes depends directly on node density and sensing range; mainly a random sensor nodes deployment is required. The major question is centred around the problem of network coverage, how can we guarantee that each point of the sensing area is covered by the required number of sensor nodes and what a sufficient condition to guarantee the network coverage? To deal with this, probabilistic intrusion detection models are adopted, called single/multi-sensing detection, and the deployment quality issue is surveyed and analysed in terms of coverage. We evaluate the capability of our probabilistic model in homogeneous wireless sensor network, in terms of sensing range, node density, and intrusion distance. {\textcopyright} 2015 Noureddine Assad et al.}, doi = {10.1155/2015/812613}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84923884883\&doi=10.1155\%2f2015\%2f812613\&partnerID=40\&md5=ad555667d0db7cbc297aef916a17e0f5}, author = {Assad, N.a and Elbhiri, B.b and Faqihi, M.A.c and Ouadou, M.a and Aboutajdine, D.a} } @article {10906318320150205, title = {Analysis of the Deployment Quality for Intrusion Detection in Wireless Sensor Networks.}, journal = {Journal of Computer Networks \& Communications}, volume = {2015}, year = {2015}, pages = {1 - 7}, abstract = {The intrusion detection application in a homogeneous wireless sensor network is defined as a mechanism to detect unauthorized intrusions or anomalous moving attackers in a field of interest. The quality of deterministic sensor nodes deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. An area may require that multiple nodes monitor each point from the sensing area; this constraint is known as k-coverage where k is the number of nodes. The deployment quality of sensor nodes depends directly on node density and sensing range; mainly a random sensor nodes deployment is required. The major question is centred around the problem of network coverage, how can we guarantee that each point of the sensing area is covered by the required number of sensor nodes and what a sufficient condition to guarantee the network coverage? To deal with this, probabilistic int}, keywords = {Intrusion detection systems (Computer security), Mathematical models, Probability theory, Wireless sensor networks, Wireless sensor nodes}, issn = {20907141}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=109063183\&site=ehost-live}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, Moulay Ahmed and Ouadou, Mohamed and Aboutajdine, Driss} } @article { ISI:000362267900001, title = {Analysis of the Deployment Quality for Intrusion Detection in Wireless Sensor Networks}, journal = {JOURNAL OF COMPUTER NETWORKS AND COMMUNICATIONS}, year = {2015}, abstract = {The intrusion detection application in a homogeneous wireless sensor network is defined as a mechanism to detect unauthorized intrusions or anomalous moving attackers in a field of interest. The quality of deterministic sensor nodes deployment can be determined sufficiently by a rigorous analysis before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging. An area may require that multiple nodes monitor each point from the sensing area; this constraint is known as k-coverage where k is the number of nodes. The deployment quality of sensor nodes depends directly on node density and sensing range; mainly a random sensor nodes deployment is required. The major question is centred around the problem of network coverage, how can we guarantee that each point of the sensing area is covered by the required number of sensor nodes and what a sufficient condition to guarantee the network coverage? To deal with this, probabilistic intrusion detection models are adopted, called single/multi-sensing detection, and the deployment quality issue is surveyed and analysed in terms of coverage. We evaluate the capability of our probabilistic model in homogeneous wireless sensor network, in terms of sensing range, node density, and intrusion distance.}, issn = {2090-7141}, doi = {10.1155/2015/812613}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, Moulay Ahmed and Ouadou, Mohamed and Aboutajdine, Driss} } @article {Benlarabi2015550, title = {Analyzing trends in software product lines evolution using a cladistics based approach}, journal = {Information (Switzerland)}, volume = {6}, number = {3}, year = {2015}, note = {cited By 1}, pages = {550-563}, abstract = {Abstract: A software product line is a complex system the aim of which is to provide a platform dedicated to large reuse. It necessitates a great investment. Thus, its ability to cope with customers{\textquoteright} ever-changing requirements is among its key success factors. Great effort has been made to deal with the software product line evolution. In our previous works, we carried out a classification of these works to provide an overview of the used techniques. We also identified the following key challenges of software product lines evolution: the ability to predict future changes, the ability to define the impact of a change easily and the improvement in understanding the change. We have already tackled the second and the third challenges. The objective of this paper is to deal with the first challenge. We use the cladistics classification which was used in biology to understand the evolution of organisms sharing the same ancestor and their process of descent at the aim of predicting their future changes. By analogy, we consider a population of applications for media management on mobile devices derived from the same platform and we use cladistics to construct their evolutionary tree. We conducted an analysis to show how to identify the evolution trends of the case study products and to predict future changes. {\textcopyright} 2015 by the authors.}, doi = {10.3390/info6030550}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84943561816\&doi=10.3390\%2finfo6030550\&partnerID=40\&md5=892c17009c76fbae807d486836450566}, author = {Benlarabi, A. and Khtira, A. and El Asri, B.} } @article {Slimani2015411, title = {Application of game theory and neural network to study the behavioral probabilities in supply chain}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {82}, number = {3}, year = {2015}, note = {cited By 3}, pages = {411-416}, abstract = {As a review of game theoretic approach to optimize the logistic costs with the objective of modeling interactions among players in a basic supply chain, this paper focuses on a single channel, two-echelon supply chain with a retailer and his supplier of a one-product where careful attention is given to information sharing in general and demand forecasting in particular. Therefore, in the industrial world, firms cannot risk waiting for the actual demand to occur so they can react and determine the quantities to purchase, produce or deliver. Demand forecasts are important and necessary to any member of the supply chain as they gave them the advantage of planning and anticipating for future needs. However, demand forecasting is one of those crucial decisions where an error can cost too much. This is why we choose to implement the artificial neural network as a forecasting technique. Obviously the closest actor to the market i.e the retailer has the best view of demand levels than the supplier, so sharing demand information with the other actors has an impact on the performance of the whole supply chain but it is not necessarily the case since the retailer can choose to withhold this information. This is why we focus in this investigation on the demand{\textquoteright}s prediction when information is not shared using the artificial intelligence of neural networks. {\textcopyright} 2005 - 2015 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952779428\&partnerID=40\&md5=12ffaf47b965757c2f2c6cf341401aac}, author = {Slimani, I.a and El Farissi, I.b and Achchab, S.c} } @conference {Aoun2015196, title = {Application of multi-agent Markov decision processes to gate assignment problem}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2015-January}, number = {January}, year = {2015}, note = {cited By 0}, pages = {196-201}, abstract = {Gate assignment Problem (GAP) is an important subject of airport management to ensure smooth traffic operations. However, flights schedule may undergo some stochastic events such as delays that usually occur and have to be considered in the planning. Our approach considers the representation of gates as collaborative agents trying to complete a set of flights assignment tasks as given by a centralized controller. That will allow giving a new model for the GAP based on Multi Agent Markov Decision Processes (MMDP). The aim of this work is to give to controllers at the airport a robust priory solution instead of taking the risk of online schedule modifications to handle uncertainty. The solution of this problem will be a set of optimal decisions to be taken in every case of traffic disturbance. {\textcopyright} 2014 IEEE.}, doi = {10.1109/CIST.2014.7016618}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938059541\&doi=10.1109\%2fCIST.2014.7016618\&partnerID=40\&md5=3c8489dfc2f96904a052d1ca21232020}, author = {Aoun, O. and El Afia, A.} } @conference { ISI:000383221800027, title = {Big Data: Measuring How Information Technology Can Improve the Economic Growth and Better Life}, booktitle = {PROCEEDINGS OF THE 2015 5TH WORLD CONGRESS ON INFORMATION AND COMMUNICATION TECHNOLOGIES (WICT)}, year = {2015}, note = {5th World Congress on Information and Communication Technologies (WICT), Marrakesh, MOROCCO, DEC 14-16, 2015}, pages = {152-159}, publisher = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, organization = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, abstract = {Today data are equivalent of oil or gold, but Succeeding with big data requires more than just data. Data-based value creation requires the identification of a framework from which predictions can be deducted and decisions can be made. Lot of index has been developed to measure the state of development of countries whether economic growth, ICT penetration or Human development. The purpose of the measuring was to assess policy implications and economic growth around the globe as well as share best practices. In this paper we are going to compare the most wildly tools of measurement of economic competiveness, Network readiness, Human development and world press freedom. Those four domains are judged to be the pillars of well-being and good live of a Human. In the second part of this paper, we examine the hypothesis that ICT penetration has positive effects on economic growth, well-being and a better life of a Human. We use the system Generalized Method of Moment for dynamic panel data analysis to extract the causal link between ICT penetration and growth.}, isbn = {978-1-4673-8712-5}, author = {Nouinou, Soumaya and Razafimampianina, Rindra M. and Regragui, Boubker and Doukkali, Abdelaziz S.}, editor = {Abrahim, A and Alimi, AM and Haqiq, A and Karray, H and Mousannif, H and BenHalima, M and Choo, YH and Ma, K} } @conference { ISI:000373736100043, title = {Building rich user profile based on intentional perspective}, booktitle = {INTERNATIONAL CONFERENCE ON ADVANCED WIRELESS INFORMATION AND COMMUNICATION TECHNOLOGIES (AWICT 2015)}, series = {Procedia Computer Science}, volume = {73}, year = {2015}, note = {International Conference on Advanced Wireless Information and Communication Technologies (AWICT), Natl Sch Engineers Sousse, TUNISIA, OCT 05-07, 2015}, pages = {342-349}, abstract = {Internet technologies evolution, from Web 1.0 to web 2.0 and web 3.0, has led us towards the definition of new requirements to be considered in the design and development of new application. It is important to note that depending solely on the request to satisfy user need is not effective. Indeed, the emergence of many researches related to the study of user behaviour has enhanced the retrieval information effectiveness. Typically, the context and the user profile are the main elements to characterize the user. Hence, we aim through this contribution to build a rich profile and to provide him suitable services. (C) 2015 The Authors. Published by Elsevier B.V.}, issn = {1877-0509}, doi = {10.1016/j.procs.2015.12.002}, author = {Alaoui, Sara and El Idrissi, Younes El Bouzekri and Ajhoun, Rachida}, editor = {Boubiche, DE and Hidoussi, F and Cruz, HT} } @conference {Alaoui2015342, title = {Building Rich User Profile Based on Intentional Perspective}, booktitle = {Procedia Computer Science}, volume = {73}, year = {2015}, note = {cited By 0}, pages = {342-349}, abstract = {Internet technologies evolution, from Web 1.0 to web 2.0 and web 3.0, has led us towards the definition of new requirements to be considered in the design and development of new application. It is important to note that depending solely on the request to satisfy user need is not effective. Indeed, the emergence of many researches related to the study of user behaviour has enhanced the retrieval information effectiveness. Typically, the context and the user profile are the main elements to characterize the user. Hence, we aim through this contribution to build a rich profile and to provide him suitable services. {\textcopyright} 2015 The Authors.}, doi = {10.1016/j.procs.2015.12.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962720975\&doi=10.1016\%2fj.procs.2015.12.002\&partnerID=40\&md5=503ca037df66420fe4a0f31a57e41f7e}, author = {Alaoui, S.a and Idrissi, Y.E.B.E.b and Ajhoun, R.a} } @conference {ElAlami2015, title = {Cloud computing \& the organizational performance: Different approach of assessment}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {This paper introduces a nutshell of the cloud computing with its different deployment and delivery models. Additionally, it presents an assessment of the impact of the cloud computing on the organizational performance from many points of view. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7337007}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962861421\&doi=10.1109\%2fCloudTech.2015.7337007\&partnerID=40\&md5=1a58632197fd4c3d98f19023421b1825}, author = {El Alami, A. and Sadok, H. and Elhaoud, N.} } @conference { ISI:000380407100046, title = {Cloud computing \& the organizational performance Different approach of assessment}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {318-322}, abstract = {this paper introduces a nutshell of the cloud computing with its different deployment and delivery models. Additionally, it presents an assessment of the impact of the cloud computing on the organizational performance from many points of view.}, isbn = {978-1-4673-8149-9}, author = {El Alami, Abdelhamid and Sadok, Hicham and Elhaoud, Naima} } @conference {Abderrazzak2015, title = {Cloud SaaS using MDA approach on a multiview models generate a SaaS from a colored Petri Net using view PNML}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {In this paper, we continue improving our MDA Framework whose aim is to generate a SaaS (Software as a Service) inside a Cloud Environment from a multiview model using a Colored Petri Nets. This framework is composed of 4 modules, we will focus on the 3rd one which design an extended PNML based on a multiview system who is supporting views. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7336977}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962783554\&doi=10.1109\%2fCloudTech.2015.7336977\&partnerID=40\&md5=b2b5aff87b415f297fb216323a47bef8}, author = {Abderrazzak, Z. and Ahmed, E.} } @conference { ISI:000380407100016, title = {Cloud SaaS Using MDA Approach on a Multiview Models Generate a SaaS from a Colored Petri Net using View PNML}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {143-147}, abstract = {In this paper, we continue improving our MDA Framework whose aim is to generate a SaaS (Software as a Service) inside a Cloud Environment from a multiview model using a Colored Petri Nets. This framework is composed of 4 modules, we will focus on the 3rd one which design an extended PNML based on a multiview system who is supporting views.}, isbn = {978-1-4673-8149-9}, author = {Abderrazzak, Zeddari and Ahmed, Ettalbi} } @conference { ISI:000380403000032, title = {Co-evolution Analysis for Software Product Lines}, booktitle = {ENASE 2015 - PROCEEDINGS OF THE 10TH INTERNATIONAL CONFERENCE ON EVALUATION OF NOVEL APPROACHES TO SOFTWARE ENGINEERING}, year = {2015}, note = {10th International Conference on Evaluation of Novel Approaches to Software Engineering, Barcelona, SPAIN, APR 29-30, 2015}, pages = {263-269}, publisher = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, organization = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, abstract = {The purpose of our approach is to study the co-evolution of the platform and the products of software product lines. Because the platform must be able to derive all the family products, products are not allowed to evolve independently from the plateform, thus the propagation of the products changes must be managed efficiently. Instead of focusing on the change impact analysis we propose an approach to compare the evolution histories of the products and the platform illustrated through evolutionary trees built using the biological technique cladistics. This comparison yields important results concerning the change propagation. In this paper, we introduce the use of cladistics for software product lines to build evolutionary trees for platform and products, then we elaborate a mathematical analysis to compare these trees, afterwards we validate this work through a case study (mobile media software product lines). We also provide the design of an automated tool.}, isbn = {978-9-8975-8143-4}, author = {Benlarabi, Anissa and Khtira, Amal and El Asri, Bouchra}, editor = {Filipe, J and Maciaszek, L} } @conference {Ennasar2015, title = {A Compact modified S-Shaped RFID tag antenna for metallic applications}, booktitle = {Mediterranean Microwave Symposium}, volume = {2016-January}, year = {2015}, note = {cited By 0}, abstract = {In this paper, A Compact modified S-Shaped RFID Tag antenna for metallic applications is presented to handle the US Band. To miniaturize the antenna to a size of 49{\texttimes}21{\texttimes}1.58 mm3 and to provide a good conjugate matching between the S-shaped antenna and the ship, the technique of adding asymmetrical triangular stubs on both side of the tag chip which are electrically connected through vias to the ground plane were applied. The simulated results show that this antenna has good performance when attached onto metallic objects. {\textcopyright} 2015 IEEE.}, doi = {10.1109/MMS.2015.7375407}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962739902\&doi=10.1109\%2fMMS.2015.7375407\&partnerID=40\&md5=378c1168863a2c2db9c3bed43072607d}, author = {Ennasar, M.A.a and Aznabet, I.b and El Mrabet, O.b and Essaaidi, M.a b b} } @conference { ISI:000380521600039, title = {A Compact Modified S-Shaped RFID Tag Antenna for Metallic Applications}, booktitle = {2015 IEEE 15th Mediterranean Microwave Symposium (MMS)}, year = {2015}, note = {2015 IEEE 15th Mediterranean Microwave Symposium (MMS), Lecce, ITALY, NOV 30-DEC 02, 2015}, abstract = {In this paper, A Compact modified S-Shaped RFID Tag antenna for metallic applications is presented to handle the US Band. To miniaturize the antenna to a size of 49x21 x 1.58 mm(3) and to provide a good conjugate matching between the S-shaped antenna and the ship, the technique of adding asymmetrical triangular stubs on both side of the tag chip which are electrically connected through vias to the ground plane were applied. The simulated results show that this antenna has good performance when attached onto metallic objects.}, isbn = {978-1-4673-7602-0}, author = {Ennasar, M. A. and Aznabet, I. and El Mrabet, O. and Essaaidi, M.} } @conference { ISI:000380509200004, title = {Construction of Cyclic One-Step Majority-Logic Decodable Codes using Genetic Algorithms}, booktitle = {2015 INTERNATIONAL CONFERENCE ON WIRELESS NETWORKS AND MOBILE COMMUNICATIONS (WINCOM)}, year = {2015}, note = {International conference on wireless networks and mobile communications, Marrakech, MOROCCO, OCT 20-23, 2015}, pages = {17-22}, abstract = {In {[}6], a construction of cyclic one-step majoritylogic decodable codes based on idempotent polynomials is given. However, the search for the feasible Parity-Check Idempotent runs through all possible combinations of cyclotomic cosets modulo n, satisfying some algebraic constraints, consequently, increasing the code length may result in very large dimension space search, and the search for the solution becomes more difficult. In this paper, we propose a Genetic Algorithm that aimes to construct new moderate and high lengths Binary Cyclic OSMLD codes, considered as LDPC codes, with high correction capacities. Our construction is very efficient and provide codes with high lenghts and high rates.}, isbn = {978-1-4673-8224-3}, author = {Yatribi, Anouar and Ayoub, Fouad and Belkasmi, Mostafa} } @conference {Yatribi2015, title = {Construction of cyclic one-step majority-logic decodable codes using genetic algorithms}, booktitle = {International Conference on Wireless Networks and Mobile Communications, WINCOM 2015}, year = {2015}, note = {cited By 0}, abstract = {In [6], a construction of cyclic one-step majority-logic decodable codes based on idempotent polynomials is given. However, the search for the feasible Parity-Check Idempotent runs through all possible combinations of cyclotomic cosets modulo n, satisfying some algebraic constraints, consequently, increasing the code length may result in very large dimension space search, and the search for the solution becomes more difficult. In this paper, we propose a Genetic Algorithm that aimes to construct new moderate and high lengths Binary Cyclic OSMLD codes, considered as LDPC codes, with high correction capacities. Our construction is very efficient and provide codes with high lenghts and high rates. {\textcopyright} 2015 IEEE.}, doi = {10.1109/WINCOM.2015.7381301}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84965037029\&doi=10.1109\%2fWINCOM.2015.7381301\&partnerID=40\&md5=81a1ec4d5126eb8f485411ea3f2f1a37}, author = {Yatribi, A.a and Ayoub, F.b and Belkasmi, M.a} } @conference {Chaker2015, title = {CSR dynamics under peer pressure and green confusion: A multi-agent simulation approach}, booktitle = {2015 10th International Conference on Intelligent Systems: Theories and Applications, SITA 2015}, year = {2015}, note = {cited By 0}, abstract = {In addressing the limitations of solving the Prisoner{\textquoteright}s Dilemma problem describing the firm-end consumer Corporate Social Responsibility (CSR) dynamics from a purely rational perspective, we propose to introduce the variables of emotions and peer pressure to capture the irrational aspect of human decision making. We refer to the Theory of Planned Behavior to construct a decision making model that describes the game, then we use multi-agent simulation to run the model under various market conditions. Results show that socially responsible firms could achieve above-average profits when they are able to quickly recover their CSR investment and when they operate in a market where a large portion of consumers hold initial positive attitudes towards CSR. In addition, our results indicate that above-average profits can be achieved only in the absence of consumer green confusion. Managerial implications on trust and customer loyalty are discussed based on the above findings. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SITA.2015.7358404}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962429373\&doi=10.1109\%2fSITA.2015.7358404\&partnerID=40\&md5=70f416ef7cc23e161ccc73120f9c3705}, author = {Chaker, F.a and El Manouar, A.a and Aaminou, M.W.b and Aboulaich, R.b} } @conference { ISI:000380409500030, title = {CSR Dynamics under Peer Pressure and Green Confusion: a Multi-Agent Simulation Approach}, booktitle = {2015 10TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2015}, note = {10th International Conference on Intelligent Systems - Theories and Applications (SITA), ENSIAS, Rabat, MOROCCO, OCT 20-21, 2015}, publisher = {IEEE}, organization = {IEEE}, abstract = {In addressing the limitations of solving the Prisoner{\textquoteright}s Dilemma problem describing the firm-end consumer Corporate Social Responsibility (CSR) dynamics from a purely rational perspective, we propose to introduce the variables of emotions and peer pressure to capture the irrational aspect of human decision making. We refer to the Theory of Planned Behavior to construct a decision making model that describes the game, then we use multi-agent simulation to run the model under various market conditions. Results show that socially responsible firms could achieve above-average profits when they are able to quickly recover their CSR investment and when they operate in a market where a large portion of consumers hold initial positive attitudes towards CSR. In addition, our results indicate that above-average profits can be achieved only in the absence of consumer green confusion. Managerial implications on trust and customer loyalty are discussed based on the above findings.}, isbn = {978-1-5090-0220-7}, author = {Chaker, Fadwa and El Manouar, Abdellah and Wail Aaminou, Mohamed and Aboulaich, Rajae} } @conference {Ayache2015771, title = {CurlX: A middleware to enforce access control policies within a cloud environment}, booktitle = {2015 IEEE Conference on Communications and NetworkSecurity, CNS 2015}, year = {2015}, note = {cited By 3}, pages = {771-772}, abstract = {Today cloud security and privacy concerns pose significant challenges. One particular challenge consists in how to take into consideration the user{\textquoteright}s security policies while accessing the outsourced data. In fact, for a given application, we may have a set of rules, as high level security policy, which needs to hold prior to any query execution. Therefore, the main problem that we tackle in this ongoing project is how to enforce this high level security policy in the cloud storage layer without conflicts. To address this challenges, we propose a middleware denoted curlX which consists of two main processes: the security policy enforcement process and the verification process. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CNS.2015.7346928}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84966440949\&doi=10.1109\%2fCNS.2015.7346928\&partnerID=40\&md5=bc69af0f99d640d39aa014a6a06475e9}, author = {Ayache, M.a and Erradi, M.a and Freisleben, B.b} } @conference { ISI:000380401800126, title = {curlX: a MiddleWare to Enforce Access Control Policies within a Cloud Environment}, booktitle = {2015 IEEE CONFERENCE ON COMMUNICATIONS AND NETWORK SECURITY (CNS)}, year = {2015}, note = {IEEE Conference Communications and Network Security CNS, Florence, ITALY, SEP 28-30, 2015}, pages = {771-772}, abstract = {Today cloud security and privacy concerns pose significant challenges. One particular challenge consists in how to take into consideration the user{\textquoteright}s security policies while accessing the outsourced data. In fact, for a given application, we may have a set of rules, as high level security policy, which needs to hold prior to any query execution. Therefore, the main problem that we tackle in this ongoing project is how to enforce this high level security policy in the cloud storage layer without conflicts. To address this challenges, we propose a middleware denoted curlX which consists of two main processes: the security policy enforcement process and the verification process.}, isbn = {978-1-4673-7876-5}, author = {Ayache, Meryeme and Erradi, Mohammed and Freisleben, Bernd} } @article {Naghar20151813, title = {Design of compact multiband bandpass filter with suppression of second harmonic spurious by coupling gap reduction}, journal = {Journal of Electromagnetic Waves and Applications}, volume = {29}, number = {14}, year = {2015}, note = {cited By 1}, pages = {1813-1828}, abstract = {In this paper, we describe a method to implement compact multiband bandpass filters with suppression of second harmonic frequency. This filter design approach is based on decreasing the coupling gap between adjacent resonators of a parallel-coupled-line bandpass filter in order to achieve both the desired multiband frequency response and the spurious suppression. We present the theoretical analysis of the proposed structure that consists of modeling the frequency dependence of the even- and odd-mode characteristic impedances as well as due to the different phase velocities of the parallel-coupled microstrip lines. As an example, a compact tri-band parallel-coupled-line bandpass filter with suppression of second harmonic frequency was implemented operating at 1.9/3.2/4.6 GHz to cover PCS1900, WiMAX, and C-band applications. A three-pole Chebyshev parallel-coupled microstrip bandpass filter was designed at a center frequency of 3.2 GHz and used as the basis to validate the gapping effect on the filter response which also achieves a narrower bandwidth for the second harmonic. Finally, the filter performance with minimized coupling gap is compared to a filter enhanced by the insertion of apertures in the ground plane. Generally speaking, good agreement was accomplished between simulated, calculated, and measured results. {\textcopyright} 2015 Taylor \& Francis.}, doi = {10.1080/09205071.2015.1043029}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84940714115\&doi=10.1080\%2f09205071.2015.1043029\&partnerID=40\&md5=b402327d69e8f5f98418025a5c5cd607}, author = {Naghar, A.a and Aghzout, O.a and Alejos, A.V.b and Sanchez, M.G.b and Essaaidi, M.c} } @article { ISI:000360305800001, title = {Design of compact multiband bandpass filter with suppression of second harmonic spurious by coupling gap reduction}, journal = {JOURNAL OF ELECTROMAGNETIC WAVES AND APPLICATIONS}, volume = {29}, number = {14}, year = {2015}, month = {SEP 22}, pages = {1813-1828}, abstract = {In this paper, we describe a method to implement compact multiband bandpass filters with suppression of second harmonic frequency. This filter design approach is based on decreasing the coupling gap between adjacent resonators of a parallel-coupled-line bandpass filter in order to achieve both the desired multiband frequency response and the spurious suppression. We present the theoretical analysis of the proposed structure that consists of modeling the frequency dependence of the even- and odd-mode characteristic impedances as well as due to the different phase velocities of the parallel-coupled microstrip lines. As an example, a compact tri-band parallel-coupled-line bandpass filter with suppression of second harmonic frequency was implemented operating at 1.9/3.2/4.6GHz to cover PCS1900, WiMAX, and C-band applications. A three-pole Chebyshev parallel-coupled microstrip bandpass filter was designed at a center frequency of 3.2GHz and used as the basis to validate the gapping effect on the filter response which also achieves a narrower bandwidth for the second harmonic. Finally, the filter performance with minimized coupling gap is compared to a filter enhanced by the insertion of apertures in the ground plane. Generally speaking, good agreement was accomplished between simulated, calculated, and measured results.}, issn = {0920-5071}, doi = {10.1080/09205071.2015.1043029}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohammed} } @article { ISI:000366391200018, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap}, journal = {IET MICROWAVES ANTENNAS \& PROPAGATION}, volume = {9}, number = {15}, year = {2015}, month = {DEC 10}, pages = {1786-1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands demonstrating an excellent performance, with a fractional bandwidth covering the 40\% and 100\% of the FCC bandwidth respectively. The proposed technique alleviates the fabrication accuracy requirements. The designs show an optimal improvement in terms of group velocity flatness.}, issn = {1751-8725}, doi = {10.1049/iet-map.2015.0188}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @article {11164386420151201, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap.}, journal = {IET Microwaves, Antennas \& Propagation}, volume = {9}, number = {15}, year = {2015}, pages = {1786 - 1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands dem}, keywords = {Bandpass filters {\textendash} Research, Chebyshev systems {\textendash} Research, Electric resonators {\textendash} Research, Ultra-wideband antennas {\textendash} Research, Wireless LANs {\textendash} Research}, issn = {17518725}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=111643864\&site=ehost-live}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @article {Naghar20151786, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap}, journal = {IET Microwaves, Antennas and Propagation}, volume = {9}, number = {15}, year = {2015}, note = {cited By 0}, pages = {1786-1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands demonstrating an excellent performance, with a fractional bandwidth covering the 40\% and 100\% of the FCC bandwidth respectively. The proposed technique alleviates the fabrication accuracy requirements. The designs show an optimal improvement in terms of group velocity flatness.}, doi = {10.1049/iet-map.2015.0188}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949946910\&doi=10.1049\%2fiet-map.2015.0188\&partnerID=40\&md5=ae56641eed11a3a3b77fe146406fba57}, author = {Naghar, A.a b and Aghzout, O.c and Alejos, A.V.a and Sanchez, M.G.a and Essaaidi, M.d} } @conference { ISI:000380403000031, title = {Detecting Feature Duplication in Natural Language Specifications when Evolving Software Product Lines}, booktitle = {ENASE 2015 - PROCEEDINGS OF THE 10TH INTERNATIONAL CONFERENCE ON EVALUATION OF NOVEL APPROACHES TO SOFTWARE ENGINEERING}, year = {2015}, note = {10th International Conference on Evaluation of Novel Approaches to Software Engineering, Barcelona, SPAIN, APR 29-30, 2015}, pages = {257-262}, publisher = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, organization = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, abstract = {Software product lines are dynamic systems that need to evolve continuously to meet new customer requirements. This evolution impacts both the core platform of the product line and its derived products. For several reasons, the most common way to express requirements by customers is natural language. However, the experience has shown that this communication channel does not give the possibility to detect system defects such as inconsistency and duplication. The objective of this paper is to propose a method to transform textual requirements into the XML format used by some Feature-oriented software development tools, in order to facilitate the detection of features duplication.}, isbn = {978-9-8975-8143-4}, author = {Khtira, Amal and Benlarabi, Anissa and El Asri, Bouchra}, editor = {Filipe, J and Maciaszek, L} } @article {Allouch201531, title = {Distributed CloudIMS: Future-Generation Network with Internet of Thing Based on Distributed Cloud Computing}, journal = {Advances in Intelligent Systems and Computing}, volume = {308 AISC}, number = {VOLUME 1}, year = {2015}, note = {cited By 0}, pages = {31-45}, abstract = {The next-generation network, cloud computing, and Internet of thing are a challenging and promising paradigm shift in IT world technology. Diminishing the cost for users for provisioning anywhere connecting at anytime from anywhere network, CloudIMS consists of interconnecting heterogeneous access technology and to respond to a major challenge for serving the increase in demand and scalable network access to share pool of configurable resource of enabling a convenient cloud computing. This paper mainly focused on common approach to integrate the IP multimedia subsystem (IMS), the Internet of thing, and cloud computing under the name of CloudIMS architecture which makes multimedia service easy to deploy on a cloud platform. We present the state of art of the different elements of CloudIMS. Moreover, we examine the layers designed for CloudIMS based on next-generation network access for mobile communication devices between different types of technologies (3GPP and non-3GPP), such as global system for mobile communication (GSM), wireless network, worldwide interoperability for microwave access (WiMAX), Universal Mobile Telecommunications System (UMTS) and long-term evolution (LTE). Finally, we present an architecture of CloudIMS according to our point of view, followed by a discussion of a use case for the future networks. {\textcopyright} Springer India 2015.}, doi = {10.1007/978-81-322-2012-1_5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906933110\&doi=10.1007\%2f978-81-322-2012-1_5\&partnerID=40\&md5=d708396f65127dc5fbaa3123c7e2b9aa}, author = {Allouch, H. and Belkasmi, M.} } @article {Khtira2015592, title = {Duplication detection when evolving feature models of software product lines}, journal = {Information (Switzerland)}, volume = {6}, number = {4}, year = {2015}, note = {cited By 1}, pages = {592-612}, abstract = {After the derivation of specific applications from a software product line, the applications keep evolving with respect to new customer{\textquoteright}s requirements. In general, evolutions in most industrial projects are expressed using natural language, because it is the easiest and the most flexible way for customers to express their needs. However, the use of this means of communication has shown its limits in detecting defects, such as inconsistency and duplication, when evolving the existing models of the software product line. The aim of this paper is to transform the natural language specifications of new evolutions into a more formal representation using natural language processing. Then, an algorithm is proposed to automatically detect duplication between these specifications and the existing product line feature models. In order to instantiate the proposed solution, a tool is developed to automatize the two operations. {\textcopyright} 2015 by the authors.}, doi = {10.3390/info6040592}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952314845\&doi=10.3390\%2finfo6040592\&partnerID=40\&md5=0661de379421d5877cc91927d0c41731}, author = {Khtira, A. and Benlarabi, A. and El Asri, B.} } @conference {Baya2015439, title = {Dynamic large scale product lines through modularization approach}, booktitle = {ICEIS 2015 - 17th International Conference on Enterprise Information Systems, Proceedings}, volume = {2}, year = {2015}, note = {cited By 0}, pages = {439-444}, abstract = {Software product line (SPL) now faces major scalability problems because of technical advances of the past decades. However, using traditional approaches of software engineering to deal with this increasing scalability is not feasible. Therefore, new techniques must be provided in order to resolve scalability issues. For such a purpose, we propose through this paper a modularization approach according to two dimensions: In the first dimension we use Island algorithm in order to obtain structural modules. In the second dimension we decompose obtained modules according to features binding time so as to obtain dynamic submodules. Copyright {\textcopyright} 2015 SCITEPRESS - Science and Technology Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84939529035\&partnerID=40\&md5=1e1405f89ebaaff933ee12e07dcfc995}, author = {Baya, A. and El Asri, B. and Dehmouch, I. and Mcharfi, Z.} } @article {Aouinatou20158, title = {A dynamic study with side channel against an identification based encryption}, journal = {International Journal of Communication Networks and Information Security}, volume = {7}, number = {1}, year = {2015}, note = {cited By 1}, pages = {8-19}, abstract = {Recently, the side channel keeps the attention of researchers in theory of pairing, since, several studies have been done in this subject and all they have the aim in order to attack the cryptosystems of Identification Based Encryption (IBE) which are integrated into Smart Cards (more than 80\% of those cryptosystems are based on a pairing). The great success and the remarkable development of the cryptography IBE in the recent years and the direct connection of this success to the ability of resistance against any kind of attack, especially the DPA (Differential Power Analysis) and DFA (Differential Fault Analysis) attacks, leave us to browse saying all the studies of the DPA and DFA attacks applied to a pairing and we have observed that they have no great effect to attack the cryptosystems of IBE. That is what we will see in this paper. In this work we will illuminate the effect of the DPA attack on a cryptosystems of IBE and we would see on what level we can arrive. Thus in the case where this attack can influence on those cryptosystems, we can present an appropriate counter-measures to resist such attack. In the other part, we will also propose a convenient counter-measure to defend against the DFA attack when the embedding degree is even.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929307912\&partnerID=40\&md5=01269c358b3d93b26c9cc87bd33fc777}, author = {Aouinatou, R.a and Belkasmi, M.b and Askali, M.b} } @conference { ISI:000360508700103, title = {Early Warning Signs Detection in Competitive Intelligence}, booktitle = {INNOVATION VISION 2020: FROM REGIONAL DEVELOPMENT SUSTAINABILITY TO GLOBAL ECONOMIC GROWTH, VOL I-VI}, year = {2015}, note = {25th International-Business-Information-Management-Association Conference, Amsterdam, NETHERLANDS, MAY 07-08, 2015}, pages = {1014-1024}, publisher = {Int Business Informat Management Assoc}, organization = {Int Business Informat Management Assoc}, abstract = {In a world of excessive competitiveness in the business environment, the competitive intelligence activities are widely used to monitor the competitors. Many techniques are used like opinion mining, event detection and foresight studies. Foresight allows studying the future and tries to predict and anticipate future actions. We can find diverse concepts and methodologies with numerous challenging research topics: trends, weak signals, wild cards... This paper will focus on weak signal detection in which we try to find the early warning signs in a performed quantitative and automatic way. It will present a literature review regarding the weak signals analysis terminology along with the processes to detect these signals, techniques, methods and approaches used.}, isbn = {978-0-9860419-4-5}, author = {El Akrouchi, Manal and Benbrahim, Houda and Kassou, Ismail}, editor = {Soliman, KS} } @conference {Allouch2015129, title = {Efficient vertical handover scheme on IMS network and cost analysis}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2015-January}, number = {January}, year = {2015}, note = {cited By 0}, pages = {129-134}, abstract = {According to the various mobility, the resource allocation, interference, and the characteristics of heterogeneity and ubiquity of access network based on the next generation network, namely, the network IMS (IP Multimedia subsystem), have imposed several challenges. In regard to mobility management, handover management, and Quality of Service (QoS) provisioning, this paper proposes an adaptive environment based on IMS, between the heterogeneous access technologies, like WiMAX, WLAN, UMTS and LTE/LTE Advanced interfaces. Nevertheless, especially, the problem of handover decision, as resolving it influence, mainly on the handover performance; ANN(Artificial Neural Network) is proposed to solve the problem of decision and selection of best network access based IMS. The objective of proposed approach algorithms, is to maximize handover performance and reliability, with a new generic mobility management architecture, for system resource utilization by reducing handover latency and achieving load balance between different heterogeneous cells and networks. {\textcopyright} 2014 IEEE.}, doi = {10.1109/CIST.2014.7016606}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938073075\&doi=10.1109\%2fCIST.2014.7016606\&partnerID=40\&md5=5744e559709c7503fddc8baba7ed4f05}, author = {Allouch, H. and Belkasmi, M.} } @article {10368063820150701, title = {E-Government Portals Maturity Models: A Best Practices{\textquoteright} Coverage Perspective.}, journal = {Journal of Software (1796217X)}, volume = {10}, number = {7}, year = {2015}, pages = {805 - 824}, abstract = {E-government is a field where oriented practice is considered crucial for its prosperity. Therefore, best practices are considered among the success factors of e-government portals. To this end, e-government maturity models can be used to provide guidance and guidelines to identify those best practices. After an extensive literature review, we have collected both; the e-government portals{\textquoteright} best practices and organized them according to their purposes in an e-Government Portals{\textquoteright} Best Practice Model (eGPBPM), and the set of 25 maturity models best practices in two separated previous published studies. The eGPBPM is composed of four best practice categories including: back-end, Web design, Web content and external. Moreover, each maturity model has several stages of maturity and each stage include a set of best practices used to rank the maturity of e-government portals. The goal of this paper is to identify the extent to which e-government maturity models are covering the best practices}, keywords = {best practices, Capability maturity model (Computer software), E-Government, e-government portal, Information \& communication technologies, Internet in public administration, maturity model, Web design, Web portals}, issn = {1796217X}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=103680638\&site=ehost-live}, author = {Fath-Allah, Abdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali} } @conference {Abdeladim2015135, title = {Elasticity and scalability centric quality model for the cloud}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2015-January}, number = {January}, year = {2015}, note = {cited By 0}, pages = {135-140}, abstract = {Cloud computing seems to be the most logical shift in terms of Information Technology after Internet, Social Networking. Despite the potential benefits that cloud computing offers, the model brings new issues, challenges, and needs in term of SLA formalization, Quality of Service (QoS) evaluation due to the heterogeneous resources and to the special features it implies, such as Elasticity and Scalability. In the scope of this paper we focus on the Elasticity and Scalability attributes to assess their impact on the QoS. The paper provides a multi-lenses overview that can help both cloud consumers and potential business application{\textquoteright}s owners to understand, analyze, and evaluate important aspects related to Scalability and Elasticity capabilities. We determine and analyze the key features of these characteristics and derive metrics that evaluate the cloud elasticity-centric capabilities. We present a specific quality model for those two characteristics derived from their sub-attributes. {\textcopyright} 2014 IEEE.}, doi = {10.1109/CIST.2014.7016607}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938093287\&doi=10.1109\%2fCIST.2014.7016607\&partnerID=40\&md5=dca7af3f86c19692e6963b90e635720b}, author = {Abdeladim, A. and Baina, S. and Baina, K.} } @conference {Sarhani20151, title = {Electric load forecasting using hybrid machine learning approach incorporating feature selection}, booktitle = {CEUR Workshop Proceedings}, volume = {1580}, year = {2015}, note = {cited By 0}, pages = {1-7}, abstract = {Forecasting of future electricity demand is very important for the electric power industry. As influenced by various factors, it has been shown in several publications that machine learning methods are useful for electric load forecasting (ELF). On the one hand, we introduce in this paper the approach of support vector regression (SVR) for ELF. In particular, we use particle swarm optimization (PSO) algorithm to optimize SVR parameters. On the other hand, it is important to determine the irrelevant factors as a preprocessing step for ELF. Our contribution consists of investigating the importance of applying the feature selection approach for removing the irrelevant factors of electric load. The experimental results elucidate the feasibility of applying feature selection without decreasing the performance of the SVR-PSO model for ELF.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84977574358\&partnerID=40\&md5=405be8fb16c29c43f82f16549d26252b}, author = {Sarhani, M. and El Afia, A.} } @conference {Azougaghe2015140, title = {An electronic voting system based on homomorphic encryption and prime numbers}, booktitle = {Proceedings of the 2015 11th International Conference on Information Assurance and Security, IAS 2015}, year = {2015}, note = {cited By 1}, pages = {140-145}, abstract = {In this paper we present an electronic voting system based on homomorphic encryption to ensure privacy, confidentiality in the voting. Our proposal offers all the advantages of the multiplicatively homomorphic encryption cryptosystems. The proposed voting scheme is suitable for multi-candidate elections as well as for elections in which contains neutral votes. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISIAS.2015.7492759}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84979503097\&doi=10.1109\%2fISIAS.2015.7492759\&partnerID=40\&md5=a062b12e4631c6978b161d42deb41a74}, author = {Azougaghe, A. and Hedabou, M. and Belkasmi, M.} } @article {Jorio2015, title = {An energy-efficient clustering routing algorithm based on geographic position and residual energy for wireless sensor network}, journal = {Journal of Computer Networks and Communications}, volume = {2015}, year = {2015}, note = {cited By 4}, abstract = {Recently wireless sensor network (WSN) has become one of the most interesting networking technologies, since it can be deployed without communication infrastructures. A sensor network is composed of a large number of sensor nodes; these nodes are responsible for supervision of the physical phenomenon and transmission of the periodical results to the base station. Therefore, improving the energy efficiency and maximizing the networking lifetime are the major challenges in this kind of networks. To deal with this, a hierarchical clustering scheme, called Location-Energy Spectral Cluster Algorithm (LESCA), is proposed in this paper. LESCA determines automatically the number of clusters in a network. It is based on spectral classification and considers both the residual energy and some properties of nodes. In fact, our approach uses the K-ways algorithm and proposes new features of the network nodes such as average energy, distance to BS, and distance to clusters centers in order to determine the clusters and to elect the cluster{\textquoteright}s heads of a WSN. The simulation results show that if the clusters are not constructed in an optimal way and/or the number of the clusters is greater or less than the optimal number of clusters, the total consumed energy of the sensor network per round is increased exponentially. {\textcopyright} 2015 Ali Jorio et al.}, doi = {10.1155/2015/170138}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84924190502\&doi=10.1155\%2f2015\%2f170138\&partnerID=40\&md5=52a0eb2501890a83a608347ccdb9e158}, author = {Jorio, A.a and El Fkihi, S.b and Elbhiri, B.c and Aboutajdine, D.a} } @article { ISI:000362266900001, title = {An Energy-Efficient Clustering Routing Algorithm Based on Geographic Position and Residual Energy for Wireless Sensor Network}, journal = {JOURNAL OF COMPUTER NETWORKS AND COMMUNICATIONS}, year = {2015}, abstract = {Recently wireless sensor network (WSN) has become one of the most interesting networking technologies, since it can be deployed without communication infrastructures. A sensor network is composed of a large number of sensor nodes; these nodes are responsible for supervision of the physical phenomenon and transmission of the periodical results to the base station. Therefore, improving the energy efficiency and maximizing the networking lifetime are the major challenges in this kind of networks. To deal with this, a hierarchical clustering scheme, called Location-Energy Spectral Cluster Algorithm (LESCA), is proposed in this paper. LESCA determines automatically the number of clusters in a network. It is based on spectral classification and considers both the residual energy and some properties of nodes. In fact, our approach uses the K-ways algorithm and proposes new features of the network nodes such as average energy, distance to BS, and distance to clusters centers in order to determine the clusters and to elect the cluster{\textquoteright}s heads of a WSN. The simulation results show that if the clusters are not constructed in an optimal way and/or the number of the clusters is greater or less than the optimal number of clusters, the total consumed energy of the sensor network per round is increased exponentially.}, issn = {2090-7141}, doi = {10.1155/2015/170138}, author = {Jorio, Ali and El Fkihi, Sanaa and Elbhiri, Brahim and Aboutajdine, Driss} } @article {10906318420150210, title = {An Energy-Efficient Clustering Routing Algorithm Based on Geographic Position and Residual Energy for Wireless Sensor Network.}, journal = {Journal of Computer Networks \& Communications}, volume = {2015}, year = {2015}, pages = {1 - 11}, abstract = {Recently wireless sensor network (WSN) has become one of the most interesting networking technologies, since it can be deployed without communication infrastructures. A sensor network is composed of a large number of sensor nodes; these nodes are responsible for supervision of the physical phenomenon and transmission of the periodical results to the base station. Therefore, improving the energy efficiency and maximizing the networking lifetime are the major challenges in this kind of networks. To deal with this, a hierarchical clustering scheme, called Location-Energy Spectral Cluster Algorithm (LESCA), is proposed in this paper. LESCA determines automatically the number of clusters in a network. It is based on spectral classification and considers both the residual energy and some properties of nodes. In fact, our approach uses the K-ways algorithm and proposes new features of the network nodes such as average energy, distance to BS, and distance to clusters centers in order to deter}, keywords = {Data transmission systems, Energy consumption, Routing algorithms, Wireless sensor networks, Wireless sensor nodes}, issn = {20907141}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=109063184\&site=ehost-live}, author = {Jorio, Ali and El Fkihi, Sanaa and Elbhiri, Brahim and Aboutajdine, Driss} } @conference {Elmouhtadi2015, title = {Fingerprint indexing based barycenter triangulation}, booktitle = {Proceedings of 2015 IEEE World Conference on Complex Systems, WCCS 2015}, year = {2015}, note = {cited By 0}, abstract = {Fingerprint indexing is an efficient technique for improving automatic fingerprint identification systems. In this paper we will present a new index method based on a hierarchical Delaunay triangulation of the minutiae points. The comparison of two fingerprints was calculated by introducing the barycenter notion so as to ensure the exact location of the similar triangles. We have performed extensive experiments and comparisons to demonstrate the effectiveness of the proposed approach using a challenging public database (i.e., FVC2000 Db1) which contains small area, low quality fingerprints. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ICoCS.2015.7483263}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978427437\&doi=10.1109\%2fICoCS.2015.7483263\&partnerID=40\&md5=58fb933699f1e58a80089aca6885a7c3}, author = {Elmouhtadi, M.a and Aboutajdine, D.a and El Fkihi, S.b} } @conference { ISI:000380408200011, title = {Flexible Miniaturized UWB CPW II-shaped Slot Antenna for Wireless Body Area Network (WBAN) Applications}, booktitle = {2015 THIRD INTERNATIONAL WORKSHOP ON RFID AND ADAPTIVE WIRELESS SENSOR NETWORKS (RAWSN)}, year = {2015}, note = {3rd International Workshop on RFID and Adaptive Wireless Sensor Networks (RAWSN), Agadir, MOROCCO, MAY 13-15, 2015}, pages = {52-56}, publisher = {FST FES; IEEE Morocco Sect; CNRST; MobiTic; Laboratoire Signaux Systemes Composants; LiMs Laboratoire}, organization = {FST FES; IEEE Morocco Sect; CNRST; MobiTic; Laboratoire Signaux Systemes Composants; LiMs Laboratoire}, abstract = {A flexible microstrip antenna printed on a Kapton Polymide substrate, excited by a CPW feed line, and operated in S-band at 3.5GHz, is successfully validated. Unlike previous flexible antennas, this structure offers a very thin thickness (0.16mm) with overall dimensions of 36x25 mm(2) that assure an easy integration into clothes and wireless body area network (WBAN) systems. Modeling and performance evaluation of the proposed antenna in term of return loss, voltage standing wave ratio, radiation pattern, and current distribution have been carried out using CST-MW STUDIO Software.}, isbn = {978-1-4673-8096-6}, author = {Afyf, Amal and Bellarbi, Larbi and Riouch, Fatima and Achour, Anouar and Errachid, Abdelhamid and Sennouni, Mohamed. Adel}, editor = {Berrada, I and Ghennioui, H} } @conference {Afyf201552, title = {Flexible miniaturized UWB CPW Π-shaped slot antenna for wireless body area network (WBAN) applications}, booktitle = {Proceedings - 2015 3rd International Workshop on RFID and Adaptive Wireless Sensor Networks, RAWSN 2015 - In conjunction with the International Conference on NETworked sYStems, NETYS 2015}, year = {2015}, note = {cited By 0}, pages = {52-56}, abstract = {A flexible microstrip antenna printed on a Kapton Polymide substrate, excited by a CPW feed line, and operated in S-band at 3.5GHz, is successfully validated. Unlike previous flexible antennas, this structure offers a very thin thickness (0.16mm) with overall dimensions of 36{\texttimes}25 mm2 that assure an easy integration into clothes and wireless body area network (WBAN) systems. Modeling and performance evaluation of the proposed antenna in term of return loss, voltage standing wave ratio, radiation pattern, and current distribution have been carried out using CST-MW STUDIO Software. {\textcopyright} 2015 IEEE.}, doi = {10.1109/RAWSN.2015.7173279}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84966552820\&doi=10.1109\%2fRAWSN.2015.7173279\&partnerID=40\&md5=66469dbc403cd2c742ec86e139da851b}, author = {Afyf, A.a and Bellarbi, L.a and Riouch, F.b and Achour, A.a and Errachid, A.c and Sennouni, M.A.d} } @conference {ElHouari2015, title = {From Big Data to Big Knowledge: The art of making Big Data alive}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {Nowadays Big Data becomes one of the biggest buzz concepts in IT world especially with the vertiginous development driving the increase of data encouraged by the emergence of high technologies of storage like cloud computing. Big Data can create efficient challenging solutions in health, security, government and more; and usher in a new era of analytics and decisions. Knowledge Management comprises a set of strategies and practices used to identify, create, represent, distribute, and enable creating experience that can constitute a real immaterial capital. However, to bring significant meaning to the perpetual tsunami of data and manage them, Big Data needs Knowledge Management. In the same way, to broaden the scope of its targeted analyzes, Knowledge Management requires Big Data. Thus, there is a complementary relation between these two major concepts. This paper presents a state of art where we try to explore Big Data within the context of Knowledge Management. We discuss the bi-directional relationship linking this two fundamental concepts and their strategic utility in making analytics valuable especially with the combination of their interactions which create an effective Big Knowledge to build experience. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7337001}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962850767\&doi=10.1109\%2fCloudTech.2015.7337001\&partnerID=40\&md5=44ddf094080e112ff052b4be166062e8}, author = {El Houari, M. and Rhanoui, M. and El Asri, B.} } @conference { ISI:000380407100040, title = {From Big Data to Big Knowledge: The Art of making Big Data Alive}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {289-294}, abstract = {Nowadays Big Data becomes one of the biggest buzz concepts in IT world especially with the vertiginous development driving the increase of data encouraged by the emergence of high technologies of storage like cloud computing. Big Data can create efficient challenging solutions in health, security, government and more; and usher in a new era of analytics and decisions. Knowledge Management comprises a set of strategies and practices used to identify, create, represent, distribute, and enable creating experience that can constitute a real immaterial capital. However, to bring significant meaning to the perpetual tsunami of data and manage them, Big Data needs Knowledge Management. In the same way, to broaden the scope of its targeted analyzes, Knowledge Management requires Big Data. Thus, there is a complementary relation between these two major concepts. This paper presents a state of art where we try to explore Big Data within the context of Knowledge Management. We discuss the bi-directional relationship linking this two fundamental concepts and their strategic utility in making analytics valuable especially with the combination of their interactions which create an effective Big Knowledge to build experience.}, isbn = {978-1-4673-8149-9}, author = {El Houari, Meryeme and Rhanoui, Maryem and El Asri, Bouchra} } @conference {Annouch2015, title = {Gas-filling stations procurement problem with time windows using a heterogeneous fleet of full tankers}, booktitle = {2015 10th International Conference on Intelligent Systems: Theories and Applications, SITA 2015}, year = {2015}, note = {cited By 0}, abstract = {In this paper, we address the problem of procurement related to the supply of liquefied petroleum gas (LPG). We are interested in the issue of shipping large quantities of bulk gas (butane and propane) from storage terminals to filling stations storage capacities using a fleet of tankers. The procurement goal is the assignment of quantities of gas to be transferred for each filling station; then, the shipping of these quantities with a heterogeneous fleet of tankers which respects the time schedule of those stations. After a review of literature for the classes of vehicle routing problem with backhauls in oil and gas procurement problems, we will present an integer linear programming formulation (ILP), then, we attempt to solve the problem using a solver, following this by testing and discussing results. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SITA.2015.7358387}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962464691\&doi=10.1109\%2fSITA.2015.7358387\&partnerID=40\&md5=cc982aff48cd7feee13855a979efa66d}, author = {Annouch, A. and Bellabdaoui, A.} } @conference { ISI:000380409500013, title = {Gas-filling stations procurement problem with time windows using a heterogeneous fleet of full tankers}, booktitle = {2015 10TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2015}, note = {10th International Conference on Intelligent Systems - Theories and Applications (SITA), ENSIAS, Rabat, MOROCCO, OCT 20-21, 2015}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we address the problem of procurement related to the supply of liquefied petroleum gas (LPG). We are interested in the issue of shipping large quantities of bulk gas (butane and propane) from storage terminals to filling stations storage capacities using a fleet of tankers. The procurement goal is the assignment of quantities of gas to be transferred for each filling station; then, the shipping of these quantities with a heterogeneous fleet of tankers which respects the time schedule of those stations. After a review of literature for the classes of vehicle routing problem with backhauls in oil and gas procurement problems, we will present an integer linear programming formulation (ILP), then, we attempt to solve the problem using a solver, following this by testing and discussing results.}, isbn = {978-1-5090-0220-7}, author = {Annouch, Anouar and Bellabdaoui, Adil} } @article {Laghouaouta2015461, title = {A generic traceability framework for model composition operation}, journal = {Lecture Notes in Business Information Processing}, volume = {214}, year = {2015}, note = {cited By 0}, pages = {461-475}, abstract = {In order to handle complexity, model driven engineering aims at building systems by developing several models, where each model represents a specific concern of the system. In this context, designers need mechanisms to validate, synchronize and understand interactions between those perspectives. Model composition deals with these issues but remains a complex task. For these reasons, we believe that a strong traceability mechanism is a key factor to handle relationships between models and manage the complexity of the composition operation. This paper describes a generic approach to keep track of the model composition operation. We also define a traces generation process to adapt our proposal to any specific composition language. Finally, an example is presented to illustrate our contributions. {\textcopyright} Springer International Publishing Switzerland 2015}, doi = {10.1007/978-3-319-19237-6_29}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84937468107\&doi=10.1007\%2f978-3-319-19237-6_29\&partnerID=40\&md5=70fd5914c3cd727e75134079e8031585}, author = {Laghouaouta, Y.a and Anwar, A.b and Nassar, M.a and Bruel, J.-M.c} } @article {11406228320150701, title = {The impact of data accuracy on user-perceived business service{\textquoteright}s quality.}, journal = {CISTI (Iberian Conference on Information Systems \& Technologies / Confer{\^e}ncia Ib{\'e}rica de Sistemas e Tecnologias de Informa{\c c}{\~a}o) Proceedings}, volume = {2}, year = {2015}, pages = {145 - 148}, abstract = {As business processes have become increasingly automated, data quality becomes the limiting and penalizing factor in the business service{\textquoteright}s overall quality, and thus impacts customer satisfaction, whether it is an end-user, an institutional partner or a regulatory authority. The available research that is related to business services{\textquoteright} quality paid very little attention to the impact of poor data quality on good services delivery and customer satisfaction, and to the calculation of the optimal level of data quality. The aim of this paper is to present a customer-oriented approach that will help to understand and analyze how an organization business service{\textquoteright}s overall quality is linked to the quality of upstream business processes and of data objects in use. This paper also introduces a calculation framework that allows the identification of an optimal level of data quality {\textendash} data accuracy dimension in the case of this paper - taking into account the business processes{\textquoteright} execution accura}, keywords = {accuracy, Business process management {\textendash} Research, Business process outsourcing, Business service and process quality, Consumers {\textendash} Attitudes, Customer satisfaction {\textendash} Research, data quality, Data quality {\textendash} Research, enterprise architecture, user satisfaction}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=114062283\&site=ehost-live}, author = {Belhiah, Meryam and Bounabat, Bouchalb and Achchab, Said} } @conference { ISI:000380620300095, title = {The impact of data accuracy on user-perceived business service{\textquoteright}s quality}, booktitle = {2015 10TH IBERIAN CONFERENCE ON INFORMATION SYSTEMS AND TECHNOLOGIES (CISTI)}, series = {Iberian Conference on Information Systems and Technologies}, year = {2015}, note = {10th Iberian Conference on Information Systems and Technologies (CISTI), Univ Aveiro, Sch Technol \& Agueda Management, Agueda, PORTUGAL, JUN 17-20, 2015}, publisher = {Iberian Assoc Syst Informat Technol}, organization = {Iberian Assoc Syst Informat Technol}, abstract = {As business processes have become increasingly automated, data quality becomes the limiting and penalizing factor in the business service{\textquoteright}s overall quality, and thus impacts customer satisfaction, whether it is an end-user, an institutional partner or a regulatory authority. The available research that is related to business services{\textquoteright} quality paid very little attention to the impact of poor data quality on good services delivery and customer satisfaction, and to the calculation of the optimal level of data quality. The aim of this paper is to present a customer-oriented approach that will help to understand and analyze how an organization business service{\textquoteright}s overall quality is linked to the quality of upstream business processes and of data objects in use. This paper also introduces a calculation framework that allows the identification of an optimal level of data quality - data accuracy dimension in the case of this paper - taking into account the business processes{\textquoteright} execution accuracy and data accuracy.}, isbn = {978-1-4799-8330-8}, issn = {2166-0727}, author = {Belhiah, Meryam and Bounabat, Bouchaib and Achchab, Said}, editor = {Rocha, A and Dias, GP and Martins, A and Reis, LP and Cota, MP} } @conference { ISI:000361005100095, title = {The impact of data accuracy on user-perceived business service{\textquoteright}s quality}, booktitle = {PROCEEDINGS OF THE 2015 10TH IBERIAN CONFERENCE ON INFORMATION SYSTEMS AND TECHNOLOGIES (CISTI 2015)}, year = {2015}, note = {10th Iberian Conference on Information Systems and Technologies (CISTI), Univ Aveiro, Sch Technol \& Agueda Management, Agueda, PORTUGAL, JUN 17-20, 2015}, publisher = {Iberian Assoc Syst Informat Technol}, organization = {Iberian Assoc Syst Informat Technol}, abstract = {As business processes have become increasingly automated, data quality becomes the limiting and penalizing factor in the business service{\textquoteright}s overall quality, and thus impacts customer satisfaction, whether it is an end-user, an institutional partner or a regulatory authority. The available research that is related to business services{\textquoteright} quality paid very little attention to the impact of poor data quality on good services delivery and customer satisfaction, and to the calculation of the optimal level of data quality. The aim of this paper is to present a customer-oriented approach that will help to understand and analyze how an organization business service{\textquoteright}s overall quality is linked to the quality of upstream business processes and of data objects in use. This paper also introduces a calculation framework that allows the identification of an optimal level of data quality - data accuracy dimension in the case of this paper - taking into account the business processes{\textquoteright} execution accuracy and data accuracy.}, author = {Belhiah, Meryam and Bounabat, Bouchaib and Achchab, Said}, editor = {Rocha, A and Dias, GP and Martins, A and Reis, LP and Cota, MP} } @conference {Belhiah2015, title = {The impact of data accuracy on user-perceived business service{\textquoteright}s quality}, booktitle = {2015 10th Iberian Conference on Information Systems and Technologies, CISTI 2015}, year = {2015}, note = {cited By 0}, abstract = {As business processes have become increasingly automated, data quality becomes the limiting and penalizing factor in the business service{\textquoteright}s overall quality, and thus impacts customer satisfaction, whether it is an end-user, an institutional partner or a regulatory authority. The available research that is related to business services{\textquoteright} quality paid very little attention to the impact of poor data quality on good services delivery and customer satisfaction, and to the calculation of the optimal level of data quality. The aim of this paper is to present a customer-oriented approach that will help to understand and analyze how an organization business service{\textquoteright}s overall quality is linked to the quality of upstream business processes and of data objects in use. This paper also introduces a calculation framework that allows the identification of an optimal level of data quality - data accuracy dimension in the case of this paper - taking into account the business processes{\textquoteright} execution accuracy and data accuracy. {\textcopyright} 2015 AISTI.}, doi = {10.1109/CISTI.2015.7170445}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84943339073\&doi=10.1109\%2fCISTI.2015.7170445\&partnerID=40\&md5=2f46571af96bd9f471d08ea67fc449f0}, author = {Belhiah, M. and Bounabat, B. and Achchab, S.} } @conference { ISI:000378322500041, title = {Intentional approach to improve the discovery of Web services}, booktitle = {INTERNATIONAL CONFERENCE ON COMPUTER VISION AND IMAGE ANALYSIS APPLICATIONS}, year = {2015}, note = {International Conference on Computer Vision and Image Analysis Applications (ICCVIA), Sousse, TUNISIA, JAN 18-20, 2015}, publisher = {IEEE; IEEE Tunisia Sect}, organization = {IEEE; IEEE Tunisia Sect}, abstract = {The web service discovery is mechanism of locating a Web Service which description match with the costumer request. The performance of discovery depends on the satisfaction of user intent; however this satisfaction has two main limitations: (1) the huge quantity of information available through the Internet, and (2) mismatch between the low level, technical software-service descriptions, and high level expressions of user needs. To overcome this problem, we propose in this paper to enrich the service oriented architecture (SOA) by an intentional approach to reduce a distance between the user and the service.}, isbn = {978-1-4799-7186-2}, author = {Alaoui, Sara and El Idrissi, Younes El Bouzekri and Ajhoun, Rachida} } @conference {Alaoui2015, title = {Intentional approach to improve the discovery of Web services}, booktitle = {Proceedings - International Conference on Computer Vision and Image Analysis Applications, ICCVIA 2015}, year = {2015}, note = {cited By 0}, abstract = {The web service discovery is mechanism of locating a Web Service which description match with the costumer request. The performance of discovery depends on the satisfaction of user intent; however this satisfaction has two main limitations: (1) the huge quantity of information available through the Internet, and (2) mismatch between the low level, technical software-service descriptions, and high level expressions of user needs. To overcome this problem, we propose in this paper to enrich the service oriented architecture (SOA) by an intentional approach to reduce a distance between the user and the service. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ICCVIA.2015.7351906}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84963815464\&doi=10.1109\%2fICCVIA.2015.7351906\&partnerID=40\&md5=1389d9204dd1c1e1b65bd70772c15eb4}, author = {Alaoui, S.a and El Idrissi, Y.E.B.b and Ajhoun, R.a} } @conference { ISI:000383221800021, title = {Iterative Threshold Decoding of Quasi-Cyclic One Step Majority Logic Decodable Codes}, booktitle = {PROCEEDINGS OF THE 2015 5TH WORLD CONGRESS ON INFORMATION AND COMMUNICATION TECHNOLOGIES (WICT)}, year = {2015}, note = {5th World Congress on Information and Communication Technologies (WICT), Marrakesh, MOROCCO, DEC 14-16, 2015}, pages = {117-122}, publisher = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, organization = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, abstract = {This paper presents a new class of Quasi-Cyclic One Step Majority logic codes of 1/2 rate constructed from perfect difference set. Theses codes can be encoded with low complexity, and perform very well when decoded with the Iterative threshold decoding algorithm. Much of these codes is a subfamily of the LDPC codes and can be decoded using belief propagation algorithm. A comparison between our results and those for LDPC code in terms of BER performance are presented.}, isbn = {978-1-4673-8712-5}, author = {Rkizat, Karim and Lahmer, Mohammed and Belkasmi, Mostafa}, editor = {Abrahim, A and Alimi, AM and Haqiq, A and Karray, H and Mousannif, H and BenHalima, M and Choo, YH and Ma, K} } @conference {McHarfi2015192, title = {Measuring the impact of traceability on the cost of Software Product Lines using COPLIMO}, booktitle = {Proceedings of 2015 International Conference on Electrical and Information Technologies, ICEIT 2015}, year = {2015}, note = {cited By 0}, pages = {192-197}, abstract = {Tracing helps assuring product quality, especially in large and complex systems such as Software Product Lines, where numerous artifacts and documents need to be linked and managed. Unfortunately, engineers and project managers rarely accord sufficient attention and importance to this approach, and adopt a traceability implementation strategy. This is due to its complexity and to the difficulties faced when implementing trace links especially costs that tracing can generates in short term. As we strongly believe that a quantitative estimation of the impact of traceability on Software Product Line{\textquoteright}s cost can help better understanding its usefulness, we present in this paper an analysis of this issue based on COPLIMO cost estimation model. The analysis shows that implementing trace links while developing the Software Product Line generates additional costs, but helps reducing them as the Product Line is used. {\textcopyright} 2015 IEEE.}, doi = {10.1109/EITech.2015.7162966}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84945277932\&doi=10.1109\%2fEITech.2015.7162966\&partnerID=40\&md5=6b5da17de43106300a775fd869ed064e}, author = {Mcharfi, Z. and El Asri, B. and Dehmouch, I. and Kriouile, A.} } @conference { ISI:000375089700036, title = {Measuring the Impact of Traceability on the Cost of Software Product Lines Using COPLIMO}, booktitle = {PROCEEDINGS OF 2015 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT 2015)}, year = {2015}, note = {1st International Conference on Electrical and Information Technologies (ICEIT), Marrakech, MOROCCO, MAR 25-27, 2015}, pages = {192-197}, abstract = {Tracing helps assuring product quality, especially in large and complex systems such as Software Product Lines, where numerous artifacts and documents need to be linked and managed. Unfortunately, engineers and project managers rarely accord sufficient attention and importance to this approach, and adopt a traceability implementation strategy. This is due to its complexity and to the difficulties faced when implementing trace links especially costs that tracing can generates in short term. As we strongly believe that a quantitative estimation of the impact of traceability on Software Product Line{\textquoteright}s cost can help better understanding its usefulness, we present in this paper an analysis of this issue based on COPLIMO cost estimation model. The analysis shows that implementing trace links while developing the Software Product Line generates additional costs, but helps reducing them as the Product Line is used.}, isbn = {978-1-4799-7479-5}, author = {Mcharfi, Zineb and El Asri, Bouchra and Dehmouch, Ram and Kriouile, Abdelaziz}, editor = {Essaaidi, M and ElHani, S} } @book {Ajana201569, title = {Middleware architecture in WSN}, series = {Wireless Sensor and Mobile Ad-Hoc Networks Vehicular and Space Applications}, year = {2015}, note = {cited By 0}, pages = {69-94}, abstract = {Sensors integrated into the environment, machinery, and structures, and coupled with the efficient delivery of sensed information could provide tremendous benefits in a wide range of applications such as improved manufacturing productivity, enhanced homeland security, fewer catastrophic failures, and improved emergency response. The design and development of these applications should address the challenges dictated by Wireless Sensor Network (WSN) characteristics on the one hand and the targeted applications on the other hand. One of the novel emerging approaches used to address these challenges is the design of middleware for WSN. Middleware refers to distributed software that can bridge the gap and remove impediments between the heterogeneous hardware platform and the backend applications requirements. In recent years, research has been carried out on WSN middleware from different aspects and for different purposes. WSN can be used with other identification technologies such as Radio Frequency Identification (RFID). In an integration system of RFID and WSN, RFID is used to identify objects while WSN can provide context environment information about these objects. This integration increases system intelligence in pervasive computing. This chapter provides a comprehensive review of the existing middleware for WSN, seeking for a better understanding of the current issues and future directions in this field. It also examines the various approaches of middleware design, compares and suggests different types of applications where each approach can be used. Finally, it proposes an enhanced middleware framework; FlexRFID for the integration of RFID and WSN. {\textcopyright} Springer New York 2015.}, doi = {10.1007/978-1-4939-2468-4_4}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944541066\&doi=10.1007\%2f978-1-4939-2468-4_4\&partnerID=40\&md5=7528b0c1dd36ea99feee4221b272284a}, author = {Ajana, M.E.K.a and Mohammed, E.a b and Boulmalf, M.c} } @conference { ISI:000380409500036, title = {Modeling and Simulation of Web Services Composition Based on MARDS Model.}, booktitle = {2015 10TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2015}, note = {10th International Conference on Intelligent Systems - Theories and Applications (SITA), ENSIAS, Rabat, MOROCCO, OCT 20-21, 2015}, publisher = {IEEE}, organization = {IEEE}, abstract = {Web services composition is an emerging paradigm for application integration within and across organizations and enterprises. A set of languages and techniques for web services composition has emerged and is continuously being enriched with new proposals from different vendors. Among these languages we remind Business Process Execution Language (BPEL) which emphasizes the importance of modelling and simulating of business process in Web Services composition. Multi-agent Systems (MAS) are composed of autonomous agents that interact and coordinate to achieve their intentions. This makes them particularly adapted to modelling composite and complex information systems. In this paper we propose a new approach for modeling and simulating the process of web services composition using the Multi-Agent Reactive Decisional System (MARDS) Model and BPEL Language.}, isbn = {978-1-5090-0220-7}, author = {Adadi, N. and Berrada, M. and Chenouni, D. and Bounabat, B.} } @conference {Adadi2015, title = {Modeling and simulation of Web Services composition based on MARDS model}, booktitle = {2015 10th International Conference on Intelligent Systems: Theories and Applications, SITA 2015}, year = {2015}, note = {cited By 0}, abstract = {Web services composition is an emerging paradigm for application integration within and across organizations and enterprises. A set of languages and techniques for web services composition has emerged and is continuously being enriched with new proposals from different vendors. Among these languages we remind Business Process Execution Language (BPEL) which emphasizes the importance of modelling and simulating of business process in Web Services composition. Multi-agent Systems (MAS) are composed of autonomous agents that interact and coordinate to achieve their intentions. This makes them particularly adapted to modelling composite and complex information systems. In this paper we propose a new approach for modeling and simulating the process of web services composition using the Multi-Agent Reactive Decisional System (MARDS) Model and BPEL Language. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SITA.2015.7358410}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962459570\&doi=10.1109\%2fSITA.2015.7358410\&partnerID=40\&md5=139a7b0bfa27d7407c85a721fd63883e}, author = {Adadi, N.a and Berrada, M.a and Chenouni, D.a and Bounabat, B.b} } @conference {Aghmadi2015, title = {A MTC traffic generation and QCI priority-first scheduling algorithm over LTE}, booktitle = {International Conference on Wireless Networks and Mobile Communications, WINCOM 2015}, year = {2015}, note = {cited By 0}, abstract = {As (M2M) Machine-To-Machine, communication continues to grow rapidly, a full study on overload control approach to manage the data and signaling of H2H traffic from massive MTC devices is required. In this paper, a new M2M resource-scheduling algorithm for Long Term Evolution (LTE) is proposed. It provides Quality of Service (QoS) guarantee to Guaranteed Bit Rate (GBR) services, we set priorities for the critical M2M services to guarantee the transportation of GBR services, which have high QoS needs. Additionally, we simulate and compare different methods and offer further observations on the solution design. {\textcopyright} 2015 IEEE.}, doi = {10.1109/WINCOM.2015.7381300}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84965013567\&doi=10.1109\%2fWINCOM.2015.7381300\&partnerID=40\&md5=1b75863c24d1fb98bed904b8c7096d6b}, author = {Aghmadi, A.a and Bouksim, I.a and Kobbane, A.a and Taleb, T.b} } @conference { ISI:000380509200003, title = {A MTC Traffic Generation and QCI Priority-First Scheduling Algorithm over LTE}, booktitle = {2015 INTERNATIONAL CONFERENCE ON WIRELESS NETWORKS AND MOBILE COMMUNICATIONS (WINCOM)}, year = {2015}, note = {International conference on wireless networks and mobile communications, Marrakech, MOROCCO, OCT 20-23, 2015}, pages = {11-16}, abstract = {As (M2M) Machine-To-Machine, communication continues to grow rapidly, a full study on overload control approach to manage the data and signaling of H2H traffic from massive MTC devices is required. In this paper, a new M2M resource-scheduling algorithm for Long Term Evolution (LTE) is proposed. It provides Quality of Service (QoS) guarantee to Guaranteed Bit Rate (GBR) services, we set priorities for the critical M2M services to guarantee the transportation of GBR services, which have high QoS needs. Additionally, we simulate and compare different methods and offer further observations on the solution design.}, isbn = {978-1-4673-8224-3}, author = {Aghmadi, Ali and Bouksim, Iliass and Kobbane, Abdellatif and Taleb, Tarik} } @article {Lakki20151070, title = {Multi-objective dynamic metric and QoS in networks manets}, journal = {Research Journal of Applied Sciences, Engineering and Technology}, volume = {10}, number = {9}, year = {2015}, note = {cited By 0}, pages = {1070-1081}, abstract = {The aim of this study is to find a Intelligent parameter which is based on mobility and Clustering. This metric will be integrate in the selection process of MPRs to improve QoS in Manets networks. The unpredictable mobility and the large quantity of generated traffic by each node interface make communication in network increasingly difficult to manage. Thus, routing protocols need to be adapted to such conditions. In order to make OLSR protocol more robust, piercing and more adaptable to the conditions dictated by the environment of each node, this study proposes a polymorphic metric that changes depending on the network behavior. This metric aims to make the OLSR protocol best suited to each zone. Many simulations would be undergone by NS2 to test and prove the validity of this new metric in environments with high mobility and quantity of traffic. {\textcopyright} Maxwell Scientific Organization, 2015.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84940775201\&partnerID=40\&md5=be882576c747f7d2d3d54cd08dc7dc91}, author = {Lakki, N.a and Ouacha, A.a and Habbani, A.a b and El Abbadi, J.a} } @conference {Rida2015, title = {Multiview SOA: Extending SOA using a private cloud computing as SaaS}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {This work is based on two major areas, the Multiview Service Oriented Architecture and the combination between the computing cloud and MVSOA. Thus, it is suggested to extend firstly the service oriented architecture (SOA) into an architecture called MVSOA by adding two components, the Multiview service generator, whose role is to transform the classic service into Multiview service, and the data base, this component seeks to stock all of consumer service information. It is also suggested to combine the computing cloud and Multiview Service Oriented Architecture MV-SOA. To reach such combination, the MV-SOA architecture was taken and we added to the client-side a private cloud in SAAS. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7337016}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962858639\&doi=10.1109\%2fCloudTech.2015.7337016\&partnerID=40\&md5=c6f0e0302d236375153f48610841c06e}, author = {Rida, B. and Ahmed, E.} } @conference { ISI:000375089700008, title = {A new adaptive algorithm for data dissemination in Vehicular Ad hoc Networks}, booktitle = {PROCEEDINGS OF 2015 INTERNATIONAL CONFERENCE ON ELECTRICAL AND INFORMATION TECHNOLOGIES (ICEIT 2015)}, year = {2015}, note = {1st International Conference on Electrical and Information Technologies (ICEIT), Marrakech, MOROCCO, MAR 25-27, 2015}, pages = {36-42}, abstract = {The idea or concept of intelligent traffic management in which data from the TIC (Traffic Information Centre) infrastructures could be reachable at any point is included mightily in Smart cities. Living labs (cities in which new designed systems can be tested in real conditions) have been created all over Europe to test the possibilities of these future cities. It{\textquoteright}s the new concept where vehicular networks play relevant role. The Vehicular Ad-hoc Networks VANETs are a sub-class of the most studied Mobile Ad hoc Networks MANET, in which vehicles are considered as mobile nodes. VANETs differ from other MANETs because of their specific characteristics, inducing numerous problems and constraints, which lead us to discuss many issues in VANETs as they are decentralized, self organized, self monitoring with distributed nature of network. When and if deployed, VANETs will be the most implemented MANET ever and will continue their development over next years; it is indeed the reason why many issues are of great concern to be studied. Ample research efforts have been devoted to VANETs in the broadcasting domain. In order to have optimal and adaptive approaches, we focus on a number of requirements. In this light, this article will present an overview of VANETs, chiefly broadcasting protocols with the aim to resolve this relevant related issue, it also present some suggested broadcasting solutions and a propose a new algorithm with the idea of integrating the concept of Intelligent Traffic Lights ITLs in order to solve the core{\textquoteright}s problem which is how to minimize the number of rebroadcast packets while maintaining good latency and reachability using dynamic parameters from the counter, probability based and distance based.}, isbn = {978-1-4799-7479-5}, author = {Naja, Assia and Essaaidi, Mohammad and Azzekhmam, Mourad and Boulmalef, Mohammed}, editor = {Essaaidi, M and ElHani, S} } @conference {Naja201536, title = {A new adaptive algorithm for data dissemination in Vehicular Ad hoc Networks}, booktitle = {Proceedings of 2015 International Conference on Electrical and Information Technologies, ICEIT 2015}, year = {2015}, note = {cited By 1}, pages = {36-42}, abstract = {The idea or concept of intelligent traffic management in which data from the TIC (Traffic Information Centre) infrastructures could be reachable at any point is included mightily in Smart cities. Living labs (cities in which new designed systems can be tested in real conditions) have been created all over Europe to test the possibilities of these future cities. It{\textquoteright}s the new concept where vehicular networks play relevant role. The Vehicular Ad-hoc Networks VANETs are a sub-class of the most studied Mobile Ad hoc Networks MANET, in which vehicles are considered as mobile nodes. VANETs differ from other MANETs because of their specific characteristics, inducing numerous problems and constraints, which lead us to discuss many issues in VANETs as they are decentralized, self organized, self monitoring with distributed nature of network. When and if deployed, VANETs will be the most implemented MANET ever and will continue their development over next years; it is indeed the reason why many issues are of great concern to be studied. Ample research efforts have been devoted to VANETs in the broadcasting domain. In order to have optimal and adaptive approaches, we focus on a number of requirements. In this light, this article will present an overview of VANETs, chiefly broadcasting protocols with the aim to resolve this relevant related issue, it also present some suggested broadcasting solutions and a propose a new algorithm with the idea of integrating the concept of Intelligent Traffic Lights ITLs in order to solve the core{\textquoteright}s problem which is how to minimize the number of rebroadcast packets while maintaining good latency and reachability using dynamic parameters from the counter, probability based and distance based. {\textcopyright} 2015 IEEE.}, doi = {10.1109/EITech.2015.7162992}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84945244354\&doi=10.1109\%2fEITech.2015.7162992\&partnerID=40\&md5=c7f8bcb1358409fdb7406184ebe02d9e}, author = {Naja, A.a and Essaaidi, M.a and Azzekhmam, M.a and Boulmalef, M.b} } @article {Alaoui20151202, title = {New approach for service discovery and prediction based on intentional perspective and recommendation}, journal = {International Review on Computers and Software}, volume = {10}, number = {12}, year = {2015}, note = {cited By 1}, pages = {1202-1208}, abstract = {The goal of service selection issue is to generate suitable service to the customer. The selection was addressed by many approaches to face into the riches and the variety of services available online. Currently the users cannot find what they want easily, so to overcome this challenge a recommender system is needed. This system is widely applied in many fields such as commercial sites, social networks and service oriented architecture. It allows guiding of the user in their navigation, based on their previous choices or on its neighbors. Despite the advantages of such systems there are always limitations. We aim through this paper to tilt the angle of recommendation by intermixing between user intention and recommendation, to improve the selection issue. {\textcopyright} 2015 Praise Worthy Prize S.r.l. - All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959486854\&partnerID=40\&md5=e1f5db5d53e80a5af91dbe5c80d94cd9}, author = {Alaoui, S.a and El Idrissi, Y.E.B.b and Ajhoun, R.a and El Mendili, F.b and Benfaress, C.b} } @conference {Ennasar2015, title = {A new modified S-shaped compact antenna for RFID-UHF tag applications}, booktitle = {Mediterranean Microwave Symposium}, volume = {2015-April}, year = {2015}, note = {cited By 0}, abstract = {In this paper, a new modified S-shaped antenna for RFID -UHF tag application is presented. To provide a good conjugate matching between the S-shaped tag antenna and the ship, the technique of adding asymmetrical triangular stubs on both side of the tag chip was applied. The proposed tag antenna operating in the 915MHz RFID band proposed here has dimensions of only 51{\texttimes}43 mm2 and the operating bandwidth is from 909 to 920 MHz (to cover US Band) under -10dB reflection coefficient condition. Furthermore, the measured read range was found to be 0.56 m at 915 MHz. This small value makes the proposed antenna suitable for many RFID-UHF applications, especially those that may involve short reading distance. {\textcopyright} 2014 IEEE.}, doi = {10.1109/MMS.2014.7088941}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938305261\&doi=10.1109\%2fMMS.2014.7088941\&partnerID=40\&md5=837505d6cad885a56904780f9b0958d2}, author = {Ennasar, M.A.a and Berbia, H.a and Essaaidi, M.a and El Mrabet, O.b and Aznabet, I.b and Aznabet, M.b and Tedjini, S.c} } @conference {Anter2015, title = {Nk-schemas: A novel algorithm for creating the views{\textquoteright} sch{\'e}mas to materialize in hybrid mediator}, booktitle = {Proceedings of 2015 IEEE World Conference on Complex Systems, WCCS 2015}, year = {2015}, note = {cited By 0}, abstract = {The explosion of information and telecommunications technologies, has made easy the access and production of information. Thus, a very large mass of the latter has generated. This situation has made the integration systems an immediate necessity. Among these systems, there is the hybrid mediator. The latter interrogates one part of data on demand as in the virtual approach, while charging, filtering and storing the second part, as views, in a local database. The creation of this second part is a critical task. We propose in this paper, a new algorithm for creating views{\textquoteright} schemas to materialize in the hybrid integration system. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ICoCS.2015.7483282}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978397930\&doi=10.1109\%2fICoCS.2015.7483282\&partnerID=40\&md5=c2f29f0e70dec9947e07f07a38190ec7}, author = {Anter, S. and Zellou, A. and Idri, A.} } @conference {Adadi2015, title = {Ontology based composition of e-Government services using AI Planning}, booktitle = {2015 10th International Conference on Intelligent Systems: Theories and Applications, SITA 2015}, year = {2015}, note = {cited By 0}, abstract = {A major propelling technology for electronic government (e-Government) is the powerful concept of Semantic Web Service. Semantically enriched Web services promise to increase the level of automation and to reduce integration efforts significantly. On the other hand, and due to the heterogeneous structure of the public sector, the achievement of interoperability and integration is a key challenge for a comprehensive e-Government. Therefore, the combination of e-Government and Semantic Web Services is very much natural. In this paper, we present a dynamic approach for semantically composing e-Government Web services based on Artificial Intelligence (AI) techniques. The overall objective of our approach is to improve the citizen centric e-Government vision by providing a platform for automatically discovering, composing and optimizing e-Government services. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SITA.2015.7358430}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962383271\&doi=10.1109\%2fSITA.2015.7358430\&partnerID=40\&md5=7ef06f1db05a4059c1b4423498a9fc41}, author = {Adadi, A.a and Berrada, M.a and Chenouni, D.a and Bounabat, B.b} } @conference { ISI:000380409500056, title = {Ontology based composition of e-Government services using AI Planning}, booktitle = {2015 10TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2015}, note = {10th International Conference on Intelligent Systems - Theories and Applications (SITA), ENSIAS, Rabat, MOROCCO, OCT 20-21, 2015}, publisher = {IEEE}, organization = {IEEE}, abstract = {A major propelling technology for electronic government (e-Government) is the powerful concept of Semantic Web Service. Semantically enriched Web services promise to increase the level of automation and to reduce integration efforts significantly. On the other hand, and due to the heterogeneous structure of the public sector, the achievement of interoperability and integration is a key challenge for a comprehensive e-Government. Therefore, the combination of e-Government and Semantic Web Services is very much natural. In this paper, we present a dynamic approach for semantically composing e-Government Web services based on Artificial Intelligence (AI) techniques. The overall objective of our approach is to improve the citizen centric e-Government vision by providing a platform for automatically discovering, composing and optimizing e-Government services.}, isbn = {978-1-5090-0220-7}, author = {Adadi, Amina and Berrada, Mohammed and Chenouni, Driss and Bounabat, Bouchaib} } @conference { ISI:000380407100015, title = {An Overview of Variability Management in Cloud Services}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {138-142}, abstract = {The Cloud Computing paradigm has enabled the multitenancy architectural approach in Cloud applications. In a multitenant application, one single instance is shared by many tenants (users). Since each client has its own and particular requirements and context, that are exposed to change in time, the process of realizing customizable and adaptable services becomes complex and difficult to handle. Handling variability has been subject of many research works in the domain of Service Oriented Computing, while less interest was given to variability in Cloud services. Therefore, managing variability in Cloud applications can take benefit from related works in service-oriented computing. In this paper, we present an overview of variability management in Cloud applications, in order to assess the pertinence of the different approaches proposed to tackle this issue.}, isbn = {978-1-4673-8149-9}, author = {Aouzal, Khadija and Hafiddi, Hatim and Dahchour, Mohamed} } @conference {Aouzal2015, title = {An overview of variability management in cloud services}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {The Cloud Computing paradigm has enabled the multi-tenancy architectural approach in Cloud applications. In a multi-tenant application, one single instance is shared by many tenants (users). Since each client has its own and particular requirements and context, that are exposed to change in time, the process of realizing customizable and adaptable services becomes complex and difficult to handle. Handling variability has been subject of many research works in the domain of Service Oriented Computing, while less interest was given to variability in Cloud services. Therefore, managing variability in Cloud applications can take benefit from related works in service-oriented computing. In this paper, we present an overview of variability management in Cloud applications, in order to assess the pertinence of the different approaches proposed to tackle this issue. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7336976}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962861131\&doi=10.1109\%2fCloudTech.2015.7336976\&partnerID=40\&md5=08685792a476e627b2fe63ca5f3f1d63}, author = {Aouzal, K.a and Hafiddi, H.a b and Dahchour, M.a} } @conference { ISI:000380407100019, title = {A pareto-based Artificial Bee Colony and Product Line for Optimizing Scheduling of VM on Cloud Computing}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {159-165}, abstract = {In this paper, we present a task scheduling management based on the utility model which is used in economics to represent the needs of both the client and the provider. Indeed, our work copes with two man parameters that affect the broker, the cost of virtual machine instances and their response time. Minimizing those two objectives give the best quality of service to the customers and offer the broker an important profit. In fact, we consider the virtual machines as a product line and use the feature models to represent the virtual machines configurations to select the efficient resources that suit customer requirements and try at same time to minimize virtual machine cost. An efficient task scheduling mechanism can not only fit client{\textquoteright}s requirements, but also improve the resource utilization, be aware of the changing environment and intends to try to balance the system. Thus, our work is based on Artificial Bee Colony to optimize the scheduling of tasks on virtual machine in cloud computing by analyzing the difference of virtual machine load balancing algorithm.}, isbn = {978-1-4673-8149-9}, author = {Benali, Asmae and El Asri, Bouchra and Kriouile, Houda} } @conference {Benali2015, title = {A pareto-based Artificial Bee Colony and product line for optimizing scheduling of VM on cloud computing}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {In this paper, we present a task scheduling management based on the utility model which is used in economics to represent the needs of both the client and the provider. Indeed, our work copes with two man parameters that affect the broker, the cost of virtual machine instances and their response time. Minimizing those two objectives give the best quality of service to the customers and offer the broker an important profit. In fact, we consider the virtual machines as a product line and use the feature models to represent the virtual machines configurations to select the efficient resources that suit customer requirements and try at same time to minimize virtual machine cost. An efficient task scheduling mechanism can not only fit client{\textquoteright}s requirements, but also improve the resource utilization, be aware of the changing environment and intends to try to balance the system. Thus, our work is based on Artificial Bee Colony to optimize the scheduling of tasks on virtual machine in cloud computing by analyzing the difference of virtual machine load balancing algorithm. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7336980}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962909836\&doi=10.1109\%2fCloudTech.2015.7336980\&partnerID=40\&md5=2a3ad1b3ec1a9d144ab4537c9de687dd}, author = {Benali, A. and El Asri, B. and Kriouile, H.} } @conference { ISI:000380408200016, title = {Performance analysis of coding using Distributed Turbo Codes}, booktitle = {2015 THIRD INTERNATIONAL WORKSHOP ON RFID AND ADAPTIVE WIRELESS SENSOR NETWORKS (RAWSN)}, year = {2015}, note = {3rd International Workshop on RFID and Adaptive Wireless Sensor Networks (RAWSN), Agadir, MOROCCO, MAY 13-15, 2015}, pages = {78-81}, publisher = {FST FES; IEEE Morocco Sect; CNRST; MobiTic; Laboratoire Signaux Systemes Composants; LiMs Laboratoire}, organization = {FST FES; IEEE Morocco Sect; CNRST; MobiTic; Laboratoire Signaux Systemes Composants; LiMs Laboratoire}, abstract = {Nowadays the need for fast and reliable communication is increasing, which leads us to look for new ways to enhance channel coding. In this paper, we will study the case of a distributed coding between two users that aim to transmit data to a common destination, where each user transmits a partial redundancy to the destination, and relies on the second user for the remaining. The purpose of distributing the redundancy creation and transmission, is to benefit from each user channel quality for a more accurate decoding. In the context of our analysis, we will use a 1/2 rate convolutional code with between users and a distributed Turbo code for transmission to the destination. However, this study will aim to highlight the different key factors, as well as the advantage of choosing a distributed encoding.}, isbn = {978-1-4673-8096-6}, author = {Aboudeine, Anas and Ayoub, Fouad and Yatribi, Anouar and Benattou, Mohammed}, editor = {Berrada, I and Ghennioui, H} } @conference {Aboudeine201578, title = {Performance analysis of coding using distributed Turbo codes}, booktitle = {Proceedings - 2015 3rd International Workshop on RFID and Adaptive Wireless Sensor Networks, RAWSN 2015 - In conjunction with the International Conference on NETworked sYStems, NETYS 2015}, year = {2015}, note = {cited By 0}, pages = {78-81}, abstract = {Nowadays the need for fast and reliable communication is increasing, which leads us to look for new ways to enhance channel coding. In this paper, we will study the case of a distributed coding between two users that aim to transmit data to a common destination, where each user transmits a partial redundancy to the destination, and relies on the second user for the remaining. The purpose of distributing the redundancy creation and transmission, is to benefit from each user channel quality for a more accurate decoding. In the context of our analysis, we will use a 1/2 rate convolutional code with between users and a distributed Turbo code for transmission to the destination. However, this study will aim to highlight the different key factors, as well as the advantage of choosing a distributed encoding. {\textcopyright} 2015 IEEE.}, doi = {10.1109/RAWSN.2015.7173284}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84966679913\&doi=10.1109\%2fRAWSN.2015.7173284\&partnerID=40\&md5=4bdc90c77449a173f3b9f74b2bb43e0e}, author = {Aboudeine, A.a and Ayoub, F.b and Yatribi, A.c and Benattou, M.a} } @conference { ISI:000383221800017, title = {A preliminary study on indicator framework for enterprise, based on COBIT 5 processes and SOA approach}, booktitle = {PROCEEDINGS OF THE 2015 5TH WORLD CONGRESS ON INFORMATION AND COMMUNICATION TECHNOLOGIES (WICT)}, year = {2015}, note = {5th World Congress on Information and Communication Technologies (WICT), Marrakesh, MOROCCO, DEC 14-16, 2015}, pages = {95-100}, publisher = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; Lab Res Grp Intelligent Machines; MIR Labs}, organization = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; Lab Res Grp Intelligent Machines; MIR Labs}, abstract = {Organizations become dependent on Information Technology (IT) to fulfill their corporate aims, meet their business needs and deliver value to customers. For effective and efficient utilization of IT, we intend to contribute to the alignment between IT and organizational strategies. The challenge is to make the enterprise and its Information System (IS) as responsive as possible within the regard to changes in enterprise while taking into account the enterprise operational performance. When a change happens or a new opportunity comes, organizations do not know which assets are linked to which business processes and which services, especially IT services, to bring up first and which can wait until later. Our principal aim is to propose an indicator framework to track and control business-IT alignment. However, this paper is a preliminary study on this framework. We are inspired by COBIT 5 processes as indicators and believe that the Service-Oriented Architecture (SOA) applied to these processes interlinks and can interact well the different processes. The validity and applicability of our theoretical study will be evaluated for future work.}, isbn = {978-1-4673-8712-5}, author = {Razafimampianina, Rindra M. and Nouinou, Soumaya and Doukkali, Abdelaziz S. and Regragui, Boubker}, editor = {Abrahim, A and Alimi, AM and Haqiq, A and Karray, H and Mousannif, H and BenHalima, M and Choo, YH and Ma, K} } @conference { ISI:000373736100045, title = {Product Opinion Mining for Competitive Intelligence}, booktitle = {INTERNATIONAL CONFERENCE ON ADVANCED WIRELESS INFORMATION AND COMMUNICATION TECHNOLOGIES (AWICT 2015)}, series = {Procedia Computer Science}, volume = {73}, year = {2015}, note = {International Conference on Advanced Wireless Information and Communication Technologies (AWICT), Natl Sch Engineers Sousse, TUNISIA, OCT 05-07, 2015}, pages = {358-365}, abstract = {Competitive Intelligence is one of the keys of companies Risk Management. It provides the company with a permanent lighting to its competitive environment. The increasingly frequent use of Information and Communication Technologies (ICT); including (namely) online shopping sites, blogs, social network sites, forums, provides incentives for companies check their advantages over their competitors. This information presents a new source that helps and leads the company to identify, analyze and manage the various risks associated with its business/products. Nowadays, a good use of these data helps the company to improve its products/services. In this paper, an overview of opinion mining for competitive intelligence will be presented. We{\textquoteright}ll try to synthesize the major research done for the different steps of product opinion mining. (C) 2015 The Authors. Published by Elsevier B.V.}, issn = {1877-0509}, doi = {10.1016/j.procs.2015.12.004}, author = {Amarouche, Kamal and Benbrahim, Houda and Kassou, Ismail}, editor = {Boubiche, DE and Hidoussi, F and Cruz, HT} } @conference {Amarouche2015358, title = {Product Opinion Mining for Competitive Intelligence}, booktitle = {Procedia Computer Science}, volume = {73}, year = {2015}, note = {cited By 1}, pages = {358-365}, abstract = {Competitive Intelligence is one of the keys of companies Risk Management. It provides the company with a permanent lighting to its competitive environment. The increasingly frequent use of Information and Communication Technologies (ICT); including (namely) online shopping sites, blogs, social network sites, forums, provides incentives for companies check their advantages over their competitors. This information presents a new source that helps and leads the company to identify, analyze and manage the various risks associated with its business/products. Nowadays, a good use of these data helps the company to improve its products/services. In this paper, an overview of opinion mining for competitive intelligence will be presented. We{\textquoteright}ll try to synthesize the major research done for the different steps of product opinion mining. {\textcopyright} 2015 The Authors.}, doi = {10.1016/j.procs.2015.12.004}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962678879\&doi=10.1016\%2fj.procs.2015.12.004\&partnerID=40\&md5=6d1bc9b3ded99bfaf39408391296ee1d}, author = {Amarouche, K. and Benbrahim, H. and Kassou, I.} } @conference {Idri2015976, title = {RBFN network based models for estimating software development effort: A cross-validation study}, booktitle = {Proceedings - 2015 IEEE Symposium Series on Computational Intelligence, SSCI 2015}, year = {2015}, note = {cited By 0}, pages = {976-983}, abstract = {Software effort estimation is very crucial and there is always a need to improve its accuracy as much as possible. Several estimation techniques have been developed in this regard and it is difficult to determine which model gives more accurate estimation on which dataset. Among all proposed methods, the Radial Basis Function Neural (RBFN) networks models have presented promising results in software effort estimation. The main objective of this research is to evaluate the RBFN networks construction based on both hard and fuzzy C-means clustering algorithms using cross-validation approach. The objective of this replication study is to investigate if the RBFN-based models learned from the training data are able to estimate accurately the efforts of yet unseen data. This evaluation uses two historical datasets, namely COCOMO81 and ISBSG R8. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SSCI.2015.142}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964949962\&doi=10.1109\%2fSSCI.2015.142\&partnerID=40\&md5=af0dde3af393745967961e241dd05385}, author = {Idri, A.a and Hassani, A.a and Abran, A.b} } @conference { ISI:000380431500128, title = {RBFN Networks-based Models for Estimating Software Development Effort: A Cross-validation Study}, booktitle = {2015 IEEE SYMPOSIUM SERIES ON COMPUTATIONAL INTELLIGENCE (IEEE SSCI)}, year = {2015}, note = {IEEE Symposium Series Computational Intelligence, Cape Town, SOUTH AFRICA, DEC 07-10, 2015}, pages = {925-932}, publisher = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, organization = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, abstract = {Software effort estimation is very crucial and there is always a need to improve its accuracy as much as possible. Several estimation techniques have been developed in this regard and it is difficult to determine which model gives more accurate estimation on which dataset. Among all proposed methods, the Radial Basis Function Neural (RBFN) networks models have presented promising results in software effort estimation. The main objective of this research is to evaluate the RBFN networks construction based on both hard and fuzzy C-means clustering algorithms using cross-validation approach. The objective of this replication study is to investigate if the RBFN-based models learned from the training data are able to estimate accurately the efforts of yet unseen data. This evaluation uses two historical datasets, namely COCOMO81 and ISBSG R8.}, isbn = {978-1-4799-7560-0}, doi = {10.1109/SSCI.2015.136}, author = {Idri, Ali and Hassani, Aya and Abran, Alain} } @conference { ISI:000380431500135, title = {RBFN Networks-based Models for Estimating Software Development Effort: A Cross-validation Study}, booktitle = {2015 IEEE SYMPOSIUM SERIES ON COMPUTATIONAL INTELLIGENCE (IEEE SSCI)}, year = {2015}, note = {IEEE Symposium Series Computational Intelligence, Cape Town, SOUTH AFRICA, DEC 07-10, 2015}, pages = {976-983}, publisher = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, organization = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, abstract = {Software effort estimation is very crucial and there is always a need to improve its accuracy as much as possible. Several estimation techniques have been developed in this regard and it is difficult to determine which model gives more accurate estimation on which dataset. Among all proposed methods, the Radial Basis Function Neural (RBFN) networks models have presented promising results in software effort estimation. The main objective of this research is to evaluate the RBFN networks construction based on both hard and fuzzy C-means clustering algorithms using cross-validation approach. The objective of this replication study is to investigate if the RBFN-based models learned from the training data are able to estimate accurately the efforts of yet unseen data. This evaluation uses two historical datasets, namely COCOMO81 and ISBSG R8.}, isbn = {978-1-4799-7560-0}, doi = {10.1109/SSCI.2015.142}, author = {Idri, Ali and Hassani, Aya and Abran, Alain} } @conference {Idri2015925, title = {RBFN networks-based models for estimating software development effort: A cross-validation study}, booktitle = {Proceedings - 2015 IEEE Symposium Series on Computational Intelligence, SSCI 2015}, year = {2015}, note = {cited By 0}, pages = {925-932}, abstract = {Software effort estimation is very crucial and there is always a need to improve its accuracy as much as possible. Several estimation techniques have been developed in this regard and it is difficult to determine which model gives more accurate estimation on which dataset. Among all proposed methods, the Radial Basis Function Neural (RBFN) networks models have presented promising results in software effort estimation. The main objective of this research is to evaluate the RBFN networks construction based on both hard and fuzzy C-means clustering algorithms using cross-validation approach. The objective of this replication study is to investigate if the RBFN-based models learned from the training data are able to estimate accurately the efforts of yet unseen data. This evaluation uses two historical datasets, namely COCOMO81 and ISBSG R8. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SSCI.2015.136}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964998599\&doi=10.1109\%2fSSCI.2015.136\&partnerID=40\&md5=ff08a732a997a0619f3c654c70abf5eb}, author = {Idri, A.a and Hassani, A.b and Abran, A.a} } @conference {Mcharfi2015463, title = {Return on investment of software product line traceability in the short, mid and long term}, booktitle = {ICEIS 2015 - 17th International Conference on Enterprise Information Systems, Proceedings}, volume = {2}, year = {2015}, note = {cited By 1}, pages = {463-468}, abstract = {Several works discuss tracing in Software Product Lines from a technical and architectural points of view, by proposing methods to implement traceability in the system. However, before discussing this field of traceability, we first need to prove the profitability of integrating such approach in the Product Line. Therefore, we bring in this paper a quantitative analysis on how traceability can impact the Return on Investment of a Software Product Line, and in which conditions, in terms of number of products and SPL phase, can tracing be profitable. We compare the results of a generic Software Product Line estimation model, COPLIMO, and our model METra-SPL. Our analysis shows that introducing traceability costs when constructing the Product Line, but can be profit making in the long term, especially in maintenance phase, starting from 2 products to generate. Copyright {\textcopyright} 2015 SCITEPRESS - Science and Technology Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84939540877\&partnerID=40\&md5=096733b1c1fcce03456eb2b99341140b}, author = {Mcharfi, Z. and El Asri, B. and Dehmouch, I. and Baya, A. and Kriouile, A.} } @conference {L{\textquoteright}Amrani201556, title = {The security of web services: Secure communication and identity management}, booktitle = {CEUR Workshop Proceedings}, volume = {1580}, year = {2015}, note = {cited By 0}, pages = {56-60}, abstract = {Service Oriented Architectures have become the new trend in the world of communication on the web. Especially web services are the high-performance specification of service-oriented architectures. The use of confidential data on the Web becomes the primary problem in the secure communication over the web. The solution proposed in this paper is a secure communication tool OCS based on the principals of SAML standard and Single Sign-On. Our solution proposes a new approach which collaborates strong points of SAML standard and single sign-on method. The implementation of this approach is in the form of a platform or a tool which provide a secure communication between web services. Thus, a future approach that exceeds the level of authentication and address the level of access control, likewise and as a further step, prepare an evaluation of the most important technologies which provide Single Sign-On possibility and secure communication context between heterogeneous web services.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84977540481\&partnerID=40\&md5=947fc7d5e6948e0dbe24131531e28dc1}, author = {L{\textquoteright}Amrani, H.a and El Bouzekri El Idrissi, Y.b and Ajhoun, R.a} } @conference {Alouane2015, title = {Security, privacy and trust in cloud computing: A comparative study}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {"Use the computer without a computer," this is the dream of all managers who believe in "Cloud Computing". The Cloud has become a dream and an obsession for all fans of the computer because of the many benefits this service offers: availability of services and data is ensured, cost is relative to consumption, ease of deployment, technical infrastructure is adaptable to the volume of business activity, its convenience to the common business applications (CRM, HR, BI, ERP, mail, etc.), the fact that this service provides a business function and not the technical components requiring computer skills..., Besides these advantages, there are serious risks related to the use of Cloud computing, such as: temporary or permanent loss of data, security of data, lack of traceability and accountability... These risks are the main challenges faced while adopting a Cloud computing architecture., In this paper, we studied the literature focusing on three major notions on collaborative systems in Cloud computing: Security, privacy and trust. That{\textquoteright}s why we will try to bring out the main requirements regarding these three concepts from both points of view (user{\textquoteright}s and provider{\textquoteright}s), before presenting some related approaches which treat these three concepts. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7336995}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962795004\&doi=10.1109\%2fCloudTech.2015.7336995\&partnerID=40\&md5=18b4f331589250a98e192c8087786f17}, author = {Alouane, M. and El Bakkali, H.} } @conference { ISI:000380407100034, title = {Security, Privacy and Trust in Cloud Computing: A Comparative Study}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {23-30}, abstract = {{\textquoteleft}{\textquoteleft}Use the computer without a computer,{{\textquoteright}{\textquoteright}} this is the dream of all managers who believe in {\textquoteleft}{\textquoteleft}Cloud Computing{{\textquoteright}{\textquoteright}}. The Cloud has become a dream and an obsession for all fans of the computer because of the many benefits this service offers: availability of services and data is ensured, cost is relative to consumption, ease of deployment, technical infrastructure is adaptable to the volume of business activity, its convenience to the common business applications (CRM, HR, BI, ERP, mail, etc.), the fact that this service provides a business function and not the technical components requiring computer skills. Besides these advantages, there are serious risks related to the use of Cloud computing, such as: temporary or permanent loss of data, security of data, lack of traceability and accountability. These risks are the main challenges faced while adopting a Cloud computing architecture. In this paper, we studied the literature focusing on three major notions on collaborative systems in Cloud computing: Security, privacy and trust. That{\textquoteright}s why we will try to bring out the main requirements regarding these three concepts from both points of view (user{\textquoteright}s and provider{\textquoteright}s), before presenting some related approaches which treat these three concepts.}, isbn = {978-1-4673-8149-9}, author = {Alouane, Meryeme and El Bakkali, Hanan} } @conference {Younes2015115, title = {Segmentation of Arabic Handwritten Text to Lines}, booktitle = {Procedia Computer Science}, volume = {73}, year = {2015}, note = {cited By 0}, pages = {115-121}, abstract = {Automatic recognition of writing is among the most important axes in the NLP (Natural language processing). Several entities of different areas demonstrated the need in recognition of handwritten Arabic characters; particularly banks check processing, post office for the automation of mail sorting, the insurance for the treatment of forms and many other industries. One of the most important operations in a handwriting recognition system is segmentation. Segmentation of handwritten text is a necessary step in the development of a system of automatic writing recognition. Its goal is to try to extract all areas of the lines of the text, and this operation is made difficult in the case of handwriting, by the presence of irregular gaps or overlap between lines and fluctuations of the guidance of scripture to the horizontal. In this paper, we have developed three approaches of handwritten Arabic text segmentation, then we compared between these three approaches. {\textcopyright} 2015 The Authors.}, doi = {10.1016/j.procs.2015.12.056}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962654008\&doi=10.1016\%2fj.procs.2015.12.056\&partnerID=40\&md5=36628c8cc1ed417c96dd04a97f7a2623}, author = {Younes, M.a and Abdellah, Y.b} } @article {Adadi2015460, title = {A semantic web service composition for E-Government services}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {71}, number = {3}, year = {2015}, note = {cited By 0}, pages = {460-467}, abstract = {Emerging from e-business ideas in the late 1990s, e-Government is seen as a concept that is focused on fully exploiting Internet and information and communication technologies to deliver effective government services to citizens, businesses, and other stakeholders. However e-Government has some specific features as opposed to traditional e-business scenarios, because of the enormous challenges it faces in achieving interoperability, integration and security, which are of interest to our dynamic Web service composition research and on which semantic Web service composition architecture could be properly demonstrated. From these reasons, we propose in this paper an approach for the composition of semantically described e- Government Web services, enabling citizens to dynamically compose services according to their goals and through a single point of access. {\textcopyright} 2005 - 2015 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84921987930\&partnerID=40\&md5=7328572edfa35d36ddcd880d57061a60}, author = {Adadi, A.a b and Berrada, M.a b and Chenouni, D.a b and Bounabat, B.a b} } @conference {Talei2015, title = {Smart campus microgrid: Advantages and the main architectural components}, booktitle = {Proceedings of 2015 IEEE International Renewable and Sustainable Energy Conference, IRSEC 2015}, year = {2015}, note = {cited By 0}, abstract = {With a world wild increase in electricity demand, smart grids emerged as a solution for various problems within the current electrical grid. Some novel aspects about smart grids are the use renewable energy sources, energy storage and also allowing the user to take some decisions to control energy use. Microgrids are building blocks of smart grids and given that academic campuses are very good contributors to energy consumption, their energy consumption can be efficiently controlled by using an energy management system. This paper focuses on the importance of smart campuses with an emphasis on the importance of an energy management system. {\textcopyright} 2015 IEEE.}, doi = {10.1109/IRSEC.2015.7455093}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84968584100\&doi=10.1109\%2fIRSEC.2015.7455093\&partnerID=40\&md5=0b0289ab22afeb7da6d364c794b3c54a}, author = {Talei, H.a and Zizi, B.a and Abid, M.R.a and Essaaidi, M.b and Benhaddou, D.c and Khalil, N.c} } @conference { ISI:000382162300171, title = {Smart Campus Microgrid: Advantages and the Main Architectural Components}, booktitle = {PROCEEDINGS OF 2015 3RD IEEE INTERNATIONAL RENEWABLE AND SUSTAINABLE ENERGY CONFERENCE (IRSEC{\textquoteright}15)}, year = {2015}, note = {3rd IEEE International Renewable and Sustainable Energy Conference (IRSEC), Marrakech, MOROCCO, DEC 10-13, 2015}, pages = {810-816}, publisher = {IEEE}, organization = {IEEE}, abstract = {With a world wild increase in electricity demand, smart grids emerged as a solution for various problems within the current electrical grid. Some novel aspects about smart grids are the use renewable energy sources, energy storage and also allowing the user to take some decisions to control energy use. Microgrids are building blocks of smart grids and given that academic campuses are very good contributors to energy consumption, their energy consumption can be efficiently controlled by using an energy management system. This paper focuses on the importance of smart campuses with an emphasis on the importance of an energy management system.}, isbn = {978-1-4673-7894-9}, author = {Talei, Hanaa and Zizi, Badr and Abid, Mohamed Riduan and Essaaidi, Mohamed and Benhaddou, Driss and Khalil, Nacer}, editor = {Essaaidi, M and Zaz, Y} } @conference { ISI:000380509200011, title = {Soft-decision Decoding by a compact Genetic Algorithm using higher selection pressure}, booktitle = {2015 INTERNATIONAL CONFERENCE ON WIRELESS NETWORKS AND MOBILE COMMUNICATIONS (WINCOM)}, year = {2015}, note = {International conference on wireless networks and mobile communications, Marrakech, MOROCCO, OCT 20-23, 2015}, pages = {58-63}, abstract = {Selection pressure controls the selection of individuals from one population to the next. It gives individuals of higher quality, higher probability of being used to create the next generation, and so for the algorithm to focus on promising regions in the search space. This paper introduces higher selection pressure to soft-decision decoding based on compact Genetic Algorithms(cGAD), and shows the effectiveness of tournament selection in the performances of the decoder. The effect of tournament size is also studied, and the new decoder based on higher selection pressure outperforms Chase-2 algorithm.}, isbn = {978-1-4673-8224-3}, author = {Berkani, Ahlam and Azouaoui, Ahmed and Belkasmi, Mostafa} } @conference {Berkani2015, title = {Soft-decision decoding by a compact genetic algorithm using higher selection pressure}, booktitle = {International Conference on Wireless Networks and Mobile Communications, WINCOM 2015}, year = {2015}, note = {cited By 1}, abstract = {Selection pressure controls the selection of individuals from one population to the next. It gives individuals of higher quality, higher probability of being used to create the next generation, and so for the algorithm to focus on promising regions in the search space. This paper introduces higher selection pressure to soft-decision decoding based on compact Genetic Algorithms(cGAD), and shows the effectiveness of tournament selection in the performances of the decoder. The effect of tournament size is also studied, and the new decoder based on higher selection pressure outperforms Chase-2 algorithm. {\textcopyright} 2015 IEEE.}, doi = {10.1109/WINCOM.2015.7381308}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84965013753\&doi=10.1109\%2fWINCOM.2015.7381308\&partnerID=40\&md5=b9dbc3ea41fe5d23850cbcb72d0df592}, author = {Berkani, A.a and Azouaoui, A.b and Belkasmi, M.a} } @article {Faizi201532, title = {Students{\textquoteright} perceptions towards using Web 2.0 technologies in education}, journal = {International Journal of Emerging Technologies in Learning}, volume = {10}, number = {6}, year = {2015}, note = {cited By 0}, pages = {32-36}, abstract = {The purpose of this paper is to evaluate the usefulness of Web 2.0 technologies in education. For this purpose, a survey was carried out to explore the students{\textquoteright} perceptions towards using these tools for learning purposes. Results of the research study revealed that all the respondents are greatly immersed in these social platforms and use them for many reasons. However, it was found out that almost half of the surveyed students (i.e. 47\%) devote more than 40\% of the time they spend on Web 2.0 technologies to enhance their learning in different subjects. Taking these findings into consideration, we can stipulate that Web 2.0 applications present many educational advantages for students, hence, contribute in providing opportunities for further learning. Consequently, these online tools provide schools and universities with more opportunities to go beyond traditional delivery formats and develop learnercentered personalized learning environments.}, doi = {10.3991/ijet.v10i6.4858}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952788796\&doi=10.3991\%2fijet.v10i6.4858\&partnerID=40\&md5=c22befa5ff5f3e9fcbbfe17e466aeee9}, author = {Faizi, R. and Chiheb, R. and El Afia, A.} } @article { ISI:000366991600005, title = {Students{\textquoteright} Perceptions Towards Using Web 2.0 Technologies in Education}, journal = {INTERNATIONAL JOURNAL OF EMERGING TECHNOLOGIES IN LEARNING}, volume = {10}, number = {6}, year = {2015}, pages = {32-36}, abstract = {The purpose of this paper is to evaluate the usefulness of Web 2.0 technologies in education. For this purpose, a survey was carried out to explore the students{\textquoteright} perceptions towards using these tools for learning purposes. Results of the research study revealed that all the respondents are greatly immersed in these social platforms and use them for many reasons. However, it was found out that almost half of the surveyed students (i.e. 47\%) devote more than 40\% of the time they spend on Web 2.0 technologies to enhance their learning in different subjects. Taking these findings into consideration, we can stipulate that Web 2.0 applications present many educational advantages for students, hence, contribute in providing opportunities for further learning. Consequently, these online tools provide schools and universities with more opportunities to go beyond traditional delivery formats and develop learnercentered personalized learning environments.}, issn = {1868-8799}, doi = {10.3991/ijet.v10i6.4858}, author = {Faizi, Rdouan and Chiheb, Raddouane and El Afia, Abdellatif} } @conference { ISI:000380439700114, title = {Systematic Mapping Study of Missing Values Techniques in Software Engineering Data}, booktitle = {2015 16TH IEEE/ACIS INTERNATIONAL CONFERENCE ON SOFTWARE ENGINEERING, ARTIFICIAL INTELLIGENCE, NETWORKING AND PARALLEL/DISTRIBUTED COMPUTING (SNPD)}, year = {2015}, note = {16th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD), Takamatsu, JAPAN, JUN 01-03, 2015}, pages = {671-678}, publisher = {IEEE; ACIS}, organization = {IEEE; ACIS}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. The simple method of dealing with MV is to ignore data with missing observations. This leads to losing valuable information and then obtaining biased results. Therefore, various techniques have been developed to deal adequately with MV, especially those based on imputation methods. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques.}, isbn = {978-1-4799-8676-7}, author = {Idri, Ali and Abnane, Ibtissam and Abran, Alain}, editor = {Saisho, K} } @conference {Idri2015, title = {Systematic mapping study of missing values techniques in software engineering data}, booktitle = {2015 IEEE/ACIS 16th International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing, SNPD 2015 - Proceedings}, year = {2015}, note = {cited By 0}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. The simple method of dealing with MV is to ignore data with missing observations. This leads to losing valuable information and then obtaining biased results. Therefore, various techniques have been developed to deal adequately with MV, especially those based on imputation methods. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SNPD.2015.7176280}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84947080832\&doi=10.1109\%2fSNPD.2015.7176280\&partnerID=40\&md5=d82d376ab83550a21470787ee8a42f45}, author = {Idri, A.a and Abnane, I.a and Abran, A.b} } @conference {Fath-Allah2015, title = {A theoretical e-government portals{\textquoteright} benchmarking framework}, booktitle = {2015 10th International Conference on Intelligent Systems: Theories and Applications, SITA 2015}, year = {2015}, note = {cited By 0}, abstract = {E-government benchmarking is the process of ranking e-government according to some agreed best practices. It can be used not only to benchmark but also to assess achievements and identify missing best practices for stakeholders. The purpose of this paper is to propose guidelines to build a new benchmarking framework for e-government portals. This framework is based on measurement of best practices using a best practice model. For this purpose, we have first identified and presented five examples of the benchmarking frameworks available in the literature. Based on the conducted comparison, the findings show that although the benchmarking frameworks are serving their intended purposes, they still suffer from some limitations. The paper also explains how the new framework overcomes these limitations. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SITA.2015.7358379}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962415171\&doi=10.1109\%2fSITA.2015.7358379\&partnerID=40\&md5=b708ecadda763851cd774d01e3b2cd31}, author = {Fath-Allah, A.a and Cheikhi, L.a and Al-Qutaish, R.E.b and Idri, A.a} } @conference { ISI:000380409500005, title = {A Theoretical E-government Portals{\textquoteright} Benchmarking Framework}, booktitle = {2015 10TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2015}, note = {10th International Conference on Intelligent Systems - Theories and Applications (SITA), ENSIAS, Rabat, MOROCCO, OCT 20-21, 2015}, publisher = {IEEE}, organization = {IEEE}, abstract = {E-government benchmarking is the process of ranking e-government according to some agreed best practices. It can be used not only to benchmark but also to assess achievements and identify missing best practices for stakeholders. The purpose of this paper is to propose guidelines to build a new benchmarking framework for e-government portals. This framework is based on measurement of best practices using a best practice model. For this purpose, we have first identified and presented five examples of the benchmarking frameworks available in the literature. Based on the conducted comparison, the findings show that although the benchmarking frameworks are serving their intended purposes, they still suffer from some limitations. The paper also explains how the new framework overcomes these limitations.}, isbn = {978-1-5090-0220-7}, author = {Fath-Allah, Abdoullah and Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali} } @conference {Belhiah2015189, title = {Towards a context-aware framework for assessing and optimizing Data Quality projects}, booktitle = {DATA 2015 - 4th International Conference on Data Management Technologies and Applications, Proceedings}, year = {2015}, note = {cited By 0}, pages = {189-194}, abstract = {This paper presents an approach to clearly identify the opportunities for increased monetary and non-monetary benefits from improved Data Quality, within an Enterprise Architecture context. The aim is to measure, in a quantitative manner, how key business processes help to execute an organization{\textquoteright}s strategy, and then to qualify the benefits as well as the complexity of improving data, that are consumed and produced by these processes. These findings will allow to clearly identify data quality improvement projects, based on the latter{\textquoteright}s benefits to the organization and their costs of implementation. To facilitate the understanding of this approach, a Java EE Web application is developed and presented here.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964994813\&partnerID=40\&md5=64fb574b1d5d15cd346582c7a58cdd20}, author = {Belhiah, M. and Benqatla, M.S. and Bounabat, B. and Achchab, S.} } @conference {Kriouile2015674, title = {Towards a high configurable SaaS: To deploy and bind auser-aware tenancy of the SaaS}, booktitle = {ICEIS 2015 - 17th International Conference on Enterprise Information Systems, Proceedings}, volume = {2}, year = {2015}, note = {cited By 0}, pages = {674-679}, abstract = {User-aware tenancy approach integrates the flexibility of the Rich-Variant Component with the high configurability of multi-tenant applications. Multi-tenancy is the notion of sharing instances among a large group of customers, called tenants. Multi-tenancy is a key enabler to exploit economies of scale for Software as a Service (SaaS) approaches. However, the ability of a SaaS application to be adapted to individual tenant{\textquoteright}s needs seem to be a major requirement. Thus, our approach proposes a more flexible and reusable SaaS system for Multi-tenant SaaS application using Rich-Variant Components. The approach consists in a user-aware tenancy for SaaS environments. In this paper, an algorithm is established to derive the necessary instances of Rich-Variant Components building the application and access to them in a scalable and performing manner. The algorithm is based on fundamental concepts from the graph theory. Copyright {\textcopyright} 2015 SCITEPRESS - Science and Technology Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84939531130\&partnerID=40\&md5=0bb15c00d81573f066c0bc43f469a26f}, author = {Kriouile, H. and Mcharfi, Z. and El Asri, B.} } @article {Kartit201537403, title = {Towards a secure electronic voting in cloud computing environment using homomorphic encryption algorithm}, journal = {International Journal of Applied Engineering Research}, volume = {10}, number = {16}, year = {2015}, note = {cited By 0}, pages = {37403-37408}, abstract = {Cloud computing is the most envisioned paradigm shift in the computing world. In this context, it becomes necessary to properly protect the data from different risks and dangers that are born with cloud computing. In recent years, Storage in Cloud gained popularity among both companies and private users. However, data privacy, security, reliability and interoperability issues still have to be adequately faced and solved. But the most important between them is security and how cloud provider assures it. In this paper we present an electronic voting system based on Homomorphic encryption. Our proposal offers all the advantages of the additively homomorphic encryption scheme. {\textcopyright} Research India Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84942041969\&partnerID=40\&md5=d7c7f2b6010ffb0011789f148b1fb2ed}, author = {Kartit, Z.a and El Marraki, M.a and Azougaghe, A.b and Belkasmi, M.b} } @conference { ISI:000380407100058, title = {Towards Deployment of a User-Aware Tenancy Using Rich-Variant Components for SaaS Applications}, booktitle = {2015 INTERNATIONAL CONFERENCE ON CLOUD TECHNOLOGIES AND APPLICATIONS (CLOUDTECH 15)}, year = {2015}, note = {International Conference on Cloud Computing Technologies and Applications (CloudTech), Marrakech, MOROCCO, JUN 02-04, 2015}, pages = {50-55}, abstract = {The approach of User-Aware Tenancy integrates the high configurability of multi-tenant applications with the flexibility and the functional variability of Rich-Variant Component use. Multi-tenancy concept consists in sharing instances among a large group of customers, called tenants. Multi-tenancy is a tool to exploit economies of scale widely promoted by Software as a Service (SaaS) models. However, the ability of a SaaS application to be adapted to individual tenant{\textquoteright}s needs seem to be a major requirement. Thus, our approach focuses on more flexibility and more reusability for Multi-tenant SaaS application using the multiview notion of Rich-Variant Components. The approach consists in a user-aware tenancy for SaaS. In this paper, we provide an application of an algorithm deriving the necessary instances of Rich-Variant Components building the application in a scalable and performing manner. The algorithm is based on fundamental concepts from the graph theory, and is accompanied by a reduced school management application as an illustrating example.}, isbn = {978-1-4673-8149-9}, author = {Kriouile, Houda and El Asri, Bouchra and El Haloui, M{\textquoteright}barek and Benali, Asmae} } @conference {Kriouile2015, title = {Towards deployment of a user-aware tenancy using rich-variant components for SaaS applications}, booktitle = {Proceedings of 2015 International Conference on Cloud Computing Technologies and Applications, CloudTech 2015}, year = {2015}, note = {cited By 0}, abstract = {The approach of User-Aware Tenancy integrates the high configurability of multi-tenant applications with the flexibility and the functional variability of Rich-Variant Component use. Multi-tenancy concept consists in sharing instances among a large group of customers, called tenants. Multi-tenancy is a tool to exploit economies of scale widely promoted by Software as a Service (SaaS) models. However, the ability of a SaaS application to be adapted to individual tenant{\textquoteright}s needs seem to be a major requirement. Thus, our approach focuses on more flexibility and more reusability for Multi-tenant SaaS application using the multiview notion of Rich-Variant Components. The approach consists in a user-aware tenancy for SaaS. In this paper, we provide an application of an algorithm deriving the necessary instances of Rich-Variant Components building the application in a scalable and performing manner. The algorithm is based on fundamental concepts from the graph theory, and is accompanied by a reduced school management application as an illustrating example. {\textcopyright} 2015 IEEE.}, doi = {10.1109/CloudTech.2015.7337019}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962798426\&doi=10.1109\%2fCloudTech.2015.7337019\&partnerID=40\&md5=1097c82a9d464b18b102f40d386e959a}, author = {Kriouile, H. and El Asri, B. and El Haloui, M. and Benali, A.} } @conference {Majda201529, title = {Using cloud SaaS to ensure interoperability and standardization in heterogeneous Cloud based environment}, booktitle = {Proceedings of the 2015 5th World Congress on Information and Communication Technologies, WICT 2015}, year = {2015}, note = {cited By 0}, pages = {29-34}, abstract = {During the last years, because of the evolution of Cloud applications and the attractive advantages given to consumers, many companies outsource their data and Information Technology (IT) system to the Cloud based technology. With the growth in the number of Cloud Service Providers, many enterprises and organizations can use and associate services from multiple providers. There may be many Internet protocols for service access standards that are used to ensure interoperability between users browser and Web server, such as Representational State Transfer (REST) and Simple Object Access Protocol (SOAP). Each service has its specific characteristics such as authentication and security requirements. Hence, Cloud providers attempt to lock customers into proprietary interfaces so consumers can find themselves at not expects inevitable vendor lock-in. Therefore, the focus of our paper is to propose a solution to this problem by using Cloud intermediate between Cloud providers and consumer in heterogeneous Cloud environments interfaces. This is to achieve benefits such as standardization and interoperability, minimizing lock-in of Cloud providers. To reach such benefits, we propose a Cloud SaaS based on two interfaces and an interne component. These interfaces are used in Cloud Consumer and Provider sides and therefore guarantee communication between Cloud Consumer and Provider using different Internet protocols for Web service access such as REST and SOAP. Our approach guarantees interoperability and standardization in heterogeneous Cloud based environments because in this case Cloud Consumer and Provider using REST interface can communicate easily with Cloud Provider and Consumer using SOAP interface. {\textcopyright} 2015 IEEE.}, doi = {10.1109/WICT.2015.7489640}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84979763191\&doi=10.1109\%2fWICT.2015.7489640\&partnerID=40\&md5=8ecb7e7a0281a3ec056175d9af0dba04}, author = {Majda, E. and Ahmed, E.} } @conference { ISI:000383221800006, title = {Using Cloud SaaS to ensure interoperability and standardization in heterogeneous Cloud based environment}, booktitle = {PROCEEDINGS OF THE 2015 5TH WORLD CONGRESS ON INFORMATION AND COMMUNICATION TECHNOLOGIES (WICT)}, year = {2015}, note = {5th World Congress on Information and Communication Technologies (WICT), Marrakesh, MOROCCO, DEC 14-16, 2015}, pages = {29-34}, publisher = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, organization = {IEEE; IEEE Morocco Sect; IEEE Morocco Sect; IEEE SMC Tech Comm Soft Comp; Lab Informatique, Reseaux Mobilite Modelisation; lab Res Grp Intelligent Machines; MIR Labs}, abstract = {During the last years, because of the evolution of Cloud applications and the attractive advantages given to consumers, many companies outsource their data and Information Technology (IT) system to the Cloud based technology. With the growth in the number of Cloud Service Providers, many enterprises and organizations can use and associate services from multiple providers. There may be many Internet protocols for service access standards that are used to ensure interoperability between users browser and Web server, such as Representational State Transfer (REST) and Simple Object Access Protocol (SOAP). Each service has its specific characteristics such as authentication and security requirements. Hence, Cloud providers attempt to lock customers into proprietary interfaces so consumers can find themselves at not expects inevitable vendor lock-in. Therefore, the focus of our paper is to propose a solution to this problem by using Cloud intermediate between Cloud providers and consumer in heterogeneous Cloud environments interfaces. This is to achieve benefits such as standardization and interoperability, minimizing lock-in of Cloud providers. To reach such benefits, we propose a Cloud SaaS based on two interfaces and an interne component. These interfaces are used in Cloud Consumer and Provider sides and therefore guarantee communication between Cloud Consumer and Provider using different Internet protocols for Web service access such as REST and SOAP. Our approach guarantees interoperability and standardization in heterogeneous Cloud based environments because in this case Cloud Consumer and Provider using REST interface can communicate easily with Cloud Provider and Consumer using SOAP interface.}, isbn = {978-1-4673-8712-5}, author = {Majda, Elhozmari and Ahmed, Ettalbi}, editor = {Abrahim, A and Alimi, AM and Haqiq, A and Karray, H and Mousannif, H and BenHalima, M and Choo, YH and Ma, K} } @article {Benamar2015100, title = {Visual contact with catadioptric cameras}, journal = {Robotics and Autonomous Systems}, volume = {64}, year = {2015}, note = {cited By 0}, pages = {100-119}, abstract = {Time to contact or time to collision (TTC) is utmost important information for animals as well as for mobile robots because it enables them to avoid obstacles; it is a convenient way to analyze the surrounding environment. The problem of TTC estimation is largely discussed in perspective images. Although a lot of works have shown the interest of omnidirectional camera for robotic applications such as localization, motion, monitoring, few works use omnidirectional images to compute the TTC. In this paper, we show that TTC can be also estimated on catadioptric images. We present two approaches for TTC estimation using directly or indirectly the optical flow based on de-rotation strategy. The first, called "gradient based TTC", is simple, fast and it does not need an explicit estimation of the optical flow. Nevertheless, this method cannot provide a TTC on each pixel, valid only for para-catadioptric sensors and requires an initial segmentation of the obstacle. The second method, called "TTC map estimation based on optical flow", estimates TTC on each point on the image and provides the depth map of the environment for any obstacle in any direction and is valid for all central catadioptric sensors. Some results and comparisons in synthetic and real images will be given. {\textcopyright} 2014 Elsevier B.V. All rights reserved.}, doi = {10.1016/j.robot.2014.09.036}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84926140754\&doi=10.1016\%2fj.robot.2014.09.036\&partnerID=40\&md5=30948376e9b031a913a35e2c865a9513}, author = {Benamar, F.a b and Elfkihi, S.a c and Demonceaux, C.d and Mouaddib, E.b and Aboutajdine, D.a} } @article { ISI:000348881600008, title = {Visual contact with catadioptric cameras}, journal = {ROBOTICS AND AUTONOMOUS SYSTEMS}, volume = {64}, year = {2015}, month = {FEB}, pages = {100-119}, abstract = {Time to contact or time to collision (TTC) is utmost important information for animals as well as for mobile robots because it enables them to avoid obstacles; it is a convenient way to analyze the surrounding environment. The problem of TTC estimation is largely discussed in perspective images. Although a lot of works have shown the interest of omnidirectional camera for robotic applications such as localization, motion, monitoring, few works use omnidirectional images to compute the TTC. In this paper, we show that TTC can be also estimated on catadioptric images. We present two approaches for TTC estimation using directly or indirectly the optical flow based on de-rotation strategy. The first, called {\textquoteleft}{\textquoteleft}gradient based TTC{{\textquoteright}{\textquoteright}}, is simple, fast and it does not need an explicit estimation of the optical flow. Nevertheless, this method cannot provide a TTC on each pixel, valid only for para-catadioptric sensors and requires an initial segmentation of the obstacle. The second method, called {\textquoteleft}{\textquoteleft}TTC map estimation based on optical flow{{\textquoteright}{\textquoteright}}, estimates TTC on each point on the image and provides the depth map of the environment for any obstacle in any direction and is valid for all central catadioptric sensors. Some results and comparisons in synthetic and real images will be given. (C) 2014 Elsevier B.V. All rights reserved.}, issn = {0921-8890}, doi = {10.1016/j.robot.2014.09.036}, author = {Benamar, F. and Elfkihi, S. and Demonceaux, C. and Mouaddib, E. and Aboutajdine, D.} } @conference {Amazal2014252, title = {An analogy-based approach to estimation of software development effort using categorical data}, booktitle = {Proceedings - 2014 Joint Conference of the International Workshop on Software Measurement, IWSM 2014 and the International Conference on Software Process and Product Measurement, Mensura 2014}, year = {2014}, note = {cited By 2}, pages = {252-262}, abstract = {Analogy-based software development effort estimation methods have proved to be a viable alternative to other conventional estimation methods since they mimic the human problem solving approach. However, they are limited by their inability to correctly handle categorical data. Therefore, we have proposed, in an earlier work, a new approach called fuzzy analogy which extends classical analogy by incorporating the fuzzy logic concept in the estimation process. The proposed approach may be applied only when the categorical values are derived from numerical data. This paper extends fuzzy analogy to deal with categorical values that are not derived from numerical data. To this aim, we used the fuzzy k-modes algorithm, a well-known clustering technique for large datasets containing categorical values. Thereafter, we evaluate the accuracy of fuzzy analogy construction-based on fuzzy k-modes using the ISBSG R8 dataset. This evaluation shows that our proposed approach leads to significant improvement in estimation accuracy. {\textcopyright} 2014 IEEE.}, doi = {10.1109/IWSM.Mensura.2014.31}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929643263\&doi=10.1109\%2fIWSM.Mensura.2014.31\&partnerID=40\&md5=84eb80725af9b61fcbeeb71e6ba04eb9}, author = {Amazal, F.-A.a and Idri, A.a and Abran, A.b} } @article {Idri2014, title = {Analogy-based software development effort estimation: A systematic mapping and review}, journal = {Information and Software Technology}, year = {2014}, note = {cited By 1; Article in Press}, abstract = {Context: Analogy-based software development effort estimation (ASEE) techniques have gained considerable attention from the software engineering community. However, to our knowledge, no systematic mapping has been created of ASEE studies and no review has been carried out to analyze the empirical evidence on the performance of ASEE techniques. Objective: The objective of this research is twofold: (1) to classify ASEE papers according to five criteria: research approach, contribution type, techniques used in combination with ASEE methods, and ASEE steps, as well as identifying publication channels and trends; and (2) to analyze these studies from five perspectives: estimation accuracy, accuracy comparison, estimation context, impact of the techniques used in combination with ASEE methods, and ASEE tools. Method: We performed a systematic mapping of ASEE studies published in the period 1990-2012, and reviewed them based on an automated search of four electronic databases. Results: In total, we identified 65 studies published between 1990 and 2012, and classified them based on our predefined classification criteria. The mapping study revealed that most researchers focus on addressing problems related to the first step of an ASEE process, that is, feature and case subset selection. The results of our detailed analysis show that ASEE methods outperform the eight techniques with which they were compared, and tend to yield acceptable results especially when combining ASEE techniques with fuzzy logic (FL) or genetic algorithms (GA). Conclusion: Based on the findings of this study, the use of other techniques such FL and GA in combination with an ASEE method is promising to generate more accurate estimates. However, the use of ASEE techniques by practitioners is still limited: developing more ASEE tools may facilitate the application of these techniques and then lead to increasing the use of ASEE techniques in industry. {\textcopyright} 2014 Elsevier B.V. All rights reserved.}, doi = {10.1016/j.infsof.2014.07.013}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906037417\&doi=10.1016\%2fj.infsof.2014.07.013\&partnerID=40\&md5=4a5eda69bfe8b11111bed58fad572629}, author = {Idri, A.a and Amazal, F.A.a and Abran, A.b} } @conference { ISI:000371484600034, title = {Application of Multi-Agent Markov Decision Processes to Gate Assignment Problem}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {196-201}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {Gate assignment Problem (GAP) is an important subject of airport management to ensure smooth traffic operations. However, flights schedule may undergo some stochastic events such as delays that usually occur and have to be considered in the planning. Our approach considers the representation of gates as collaborative agents trying to complete a set of flights assignment tasks as given by a centralized controller. That will allow giving a new model for the GAP based on Multi Agent Markov Decision Processes (MMDP). The aim of this work is to give to controllers at the airport a robust priory solution instead of taking the risk of online schedule modifications to handle uncertainty. The solution of this problem will be a set of optimal decisions to be taken in every case of traffic disturbance.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Aoun, Oussama and El Afia, Abdellatif}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @conference { ISI:000366999600191, title = {Applications of Multi-Agent Systems in Smart Grids: A Survey}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {1088-1094}, abstract = {The Smart Grids (SGs) are regarded as the new generation of electric power systems, combining the development of Information Technology (IT), distributed systems and Artificial Intelligence (AI) for more features on the real-time monitoring of the Demand / Response (DR) and the energy consumption. An approach based on the use of Multi-Agent Systems (MAS) to study the management of distribution systems, simulating the characteristics of SG. This paper presents the different platforms used for the implementation of MAS for the control and operation of smart grids. The MAS{\textquoteright} applications in SG available in the literature are also developed in this paper.}, isbn = {978-1-4799-3824-7}, author = {Merabet, Ghezlane Halhoul and Essaaidi, Mohammed and Talei, Hanaa and Abid, Mohamed Riduan and Khalil, Nacer and Madkour, Mohcine and Benhaddou, Driss} } @conference {Merabet20141088, title = {Applications of multi-Agent systems in smart grids: A survey}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2014}, note = {cited By 1}, pages = {1088-1094}, abstract = {The Smart Grids (SGs) are regarded as the new generation of electric power systems, combining the development of Information Technology (IT), distributed systems and Artificial Intelligence (AI) for more features on the real-time monitoring of the Demand /Response (DR) and the energy consumption. An approach based on the use of Multi-Agent Systems (MAS) to study the management of distribution systems, simulating the characteristics of SG. This paper presents the different platforms used for the implementation of MAS for the control and operation of smart grids. The MAS{\textquoteright} applications in SG available in the literature are also developed in this paper. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICMCS.2014.6911384}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928741215\&doi=10.1109\%2fICMCS.2014.6911384\&partnerID=40\&md5=0552200ad62f47930e0a666d76aa763d}, author = {Merabet, G.H.a and Essaaidi, M.a and Talei, H.b and Abid, M.R.b and Khalil, N.c and Madkour, M.c and Benhaddou, D.c} } @conference {Amraoui2014326, title = {CCS: A Correct Cooperation Strategy based on game theory for MANETS}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, volume = {2014}, year = {2014}, note = {cited By 1}, pages = {326-332}, abstract = {Mobile ad hoc networks (MANETs) is a multihop wireless communication network supporting mobile users (nodes) without any existing infrastructure and depend of the individual behavior of the nodes. This nature of MANETs imposes the cooperation among nodes to improve quality of service and security especially that mobility and energy are the most important criteria in MANETs. In such networks, devices can have different configurations, and should cooperate to ensure the existence of such networks. Cooperation enforcement mechanism do not provide strong authentication of entities. However, cooperation may be more difficult to apply in MANETS than infrastructure based networks due to many reasons already mentioned. In this paper we{\textquoteright}ll present a correct strategy based on cooperation rate (CR) to enforce cooperation and communication between nodes in MANET using OLSR routing protocol. The concept of this strategy based on game theory is focused on track keeps by each nodes and calculate the CR of other nodes. The CR is calculated based on various types of OLSR messages (HELLO, TC, MID and HNA) sent among nodes, and also based on different network processing (forwarding and routing). {\textcopyright} 2014 IEEE.}, doi = {10.1109/AICCSA.2014.7073216}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988269680\&doi=10.1109\%2fAICCSA.2014.7073216\&partnerID=40\&md5=bc6ef75e71388636784417f4f74c2910}, author = {Amraoui, H.a and Habbani, A.a b and Hajami, A.c} } @article {Cheikhi2014359, title = {Chidamber and kemerer object-oriented measures: Analysis of their design from the metrology perspective}, journal = {International Journal of Software Engineering and its Applications}, volume = {8}, number = {2}, year = {2014}, note = {cited By 2}, pages = {359-374}, abstract = {During the last decade, software product measurement field has known many improvements and becomes an emerging field of software engineering. Based on the used programming approaches such as object-oriented, structured programming, etc., different kinds of measures are proposed in the literature. These sets of measures were defined many years ago to measure software artifacts, for example, source code and design, However, such measures need to be verified and validated based on the lessons learned from the measurement and metrology concepts. This paper focuses on software product measures, in particular, the object-oriented measures. Moreover, it aims at analyzing the design of a well-known and one of the most used object-oriented measures, that is, the Chidamber and Kemerer measures suite. In addition, this paper provides an investigation of the extent to which this set of measures addresses the metrology concepts related to the software measurement design. {\textcopyright} 2014 SERSC.}, doi = {10.14257/ijseia.2014.8.2.35}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84896897162\&doi=10.14257\%2fijseia.2014.8.2.35\&partnerID=40\&md5=d21c99cfd00552aeb2bdae4c13dac40c}, author = {Cheikhi, L.a and Al-Qutaish, R.E.b and Idri, A.a and Sellami, A.c} } @conference { ISI:000380387700114, title = {A Co-evolution Model for Software Product Lines: an Approach Based on Evolutionary Trees}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {140-145}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {Software product line engineering allows for the development of common reference assets that can be reused in the development of a set of products of a particular market, the purpose of such large scale reuse is the improvement of time to market and the reduction of costs. This goal can only be achieved if the software product line assets are continuously evolved to cope with changes of user requirements. In this paper we present a co-evolution model for checking the integrity of the software product line during its evolution. The model is established using cladistics and trees reconciliation that are used in biology to analyze the co-evolution between organisms. This model shows the divergent co-evolution which represents a difference between the evolutionary trees of the software product line and a derived product. The model helps to find out changes of products that were not propagated to the reference assets and to anticipate the future development to be done on these reference assets to cope with products evolution. By its application we protect software product lines from the aging phenomenon.}, isbn = {978-1-4799-4647-1}, author = {Benlarabi, Anissa and El Asri, Bouchra and Khtira, Amal} } @conference {Benlarabi2014140, title = {A co-evolution model for software product lines: An approach based on evolutionary trees}, booktitle = {2014 2nd World Conference on Complex Systems, WCCS 2014}, year = {2014}, note = {cited By 3}, pages = {140-145}, abstract = {Software product line engineering allows for the development of common reference assets that can be reused in the development of a set of products of a particular market, the purpose of such large scale reuse is the improvement of time to market and the reduction of costs. This goal can only be achieved if the software product line assets are continuously evolved to cope with changes of user requirements. In this paper we present a co-evolution model for checking the integrity of the software product line during its evolution. The model is established using cladistics and trees reconciliation that are used in biology to analyze the co-evolution between organisms. This model shows the divergent co-evolution which represents a difference between the evolutionary trees of the software product line and a derived product. The model helps to find out changes of products that were not propagated to the reference assets and to anticipate the future development to be done on these reference assets to cope with products evolution. By its application we protect software product lines from the aging phenomenon. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICoCS.2014.7060991}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929164049\&doi=10.1109\%2fICoCS.2014.7060991\&partnerID=40\&md5=8647fe4ff84bea2b02dd50b90e0d9bb9}, author = {Benlarabi, A. and El Asri, B. and Khtira, A.} } @conference {Nejja2014153, title = {Correction of the Arabic derived words using surface patterns}, booktitle = {WCCCS 2014 - Proceedings; 2014 5th Workshop on Codes, Cryptography and Communication Systems}, year = {2014}, note = {cited By 0}, pages = {153-156}, abstract = {The automatic spell checker tool is one of the most efficient tools of automatic language processing systems. Its performance varies according to the correction mechanisms implemented. {\textcopyright} 2014 IEEE.}, doi = {10.1109/WCCCS.2014.7107931}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983168886\&doi=10.1109\%2fWCCCS.2014.7107931\&partnerID=40\&md5=7ba468e3d13cda8061feb344c50052ef}, author = {Nejja, M.a and Abdellah, Y.b} } @conference { ISI:000361554402054, title = {Development of a Calculator for Edge and Parallel Coupled Microstrip Band Pass Filters}, booktitle = {2014 IEEE ANTENNAS AND PROPAGATION SOCIETY INTERNATIONAL SYMPOSIUM (APSURSI)}, series = {IEEE Antennas and Propagation Society International Symposium}, year = {2014}, note = {IEEE Antennas-and-Propagation-Society International Symposium (APSURSI), Memphis, TN, JUL 06-11, 2014}, pages = {2018-2019}, publisher = {Inst Elect Elect Engineers; Inst Elect Elect Engineers Antennas \& Propagat Soc}, organization = {Inst Elect Elect Engineers; Inst Elect Elect Engineers Antennas \& Propagat Soc}, abstract = {This paper presents an implemented calculator tool for the design of Edge/Parallel Coupled Microstrip Band Pass Filters (PCMBPF) that makes use of the MA TLAB software. This calculator allows estimating both the parameters required for the design of the PCMBPF and the electrical response which is obtained by means of the equivalent circuit of this type of filters. Based on the transmission line theory approach (TLTA), the calculator herein proposed is a good solution to simply obtain the design parameters of this type of filters given that all formulas required for the PCMBPF design are programmed using close-form mathematic expressions and the coupling matrix concept. In order to validate our calculator performance, we implemented the proposed filter in a commercial electromagnetic simulator CST MWs that considers a set of electromagnetic effects, and accurately determine the final filter design. Secondly, we compared these simulation outcomes with the measurement results, achieving a reasonable agreement.}, isbn = {978-1-4799-3540-6}, issn = {1522-3965}, author = {Naghar, Azzeddin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @conference {Naghar20142018, title = {Development of a calculator for Edge and Parallel Coupled Microstrip band pass filters}, booktitle = {IEEE Antennas and Propagation Society, AP-S International Symposium (Digest)}, year = {2014}, note = {cited By 3}, pages = {2018-2019}, abstract = {This paper presents an implemented calculator tool for the design of Edge/ Parallel Coupled Microstrip Band Pass Filters (PCMBPF) that makes use of the MATLAB software. This calculator allows estimating both the parameters required for the design of the PCMBPF and the electrical response which is obtained by means of the equivalent circuit of this type of filters. Based on the transmission line theory approach (TLTA), the calculator herein proposed is a good solution to simply obtain the design parameters of this type of filters given that all formulas required for the PCMBPF design are programmed using close-form mathematic expressions and the coupling matrix concept. In order to validate our calculator performance, we implemented the proposed filter in a commercial electromagnetic simulator CST MWs that considers a set of electromagnetic effects, and accurately determine the final filter design. Secondly, we compared these simulation outcomes with the measurement results, achieving a reasonable agreement. {\textcopyright} 2014 IEEE.}, doi = {10.1109/APS.2014.6905336}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907870530\&doi=10.1109\%2fAPS.2014.6905336\&partnerID=40\&md5=bc3cd87613500ca710b9f58fdca9f27d}, author = {Naghar, A.a and Aghzout, O.a and Vazquez Alejos, A.b and Garcia Sanchez, M.b and Essaaidi, M.c} } @conference { ISI:000358381600012, title = {Effect of selfish behaviour on OLSR and AODV Routing Protocols in MANETs}, booktitle = {2014 GLOBAL SUMMIT ON COMPUTER \& INFORMATION TECHNOLOGY (GSCIT)}, year = {2014}, note = {Global Summit on Computer and Information Technology (GSCIT), Sousse, TUNISIA, JUN 14-16, 2014}, publisher = {IEEE Tunisia Sect; IEEE; Future Technologies \& Innovat}, organization = {IEEE Tunisia Sect; IEEE; Future Technologies \& Innovat}, abstract = {In this paper, we{\textquoteright}ll try to discuss the cooperation of nodes representing one of the important factors in improving the quality of service (QoS) and security in mobile ad hoc networks (MANETs). The routing protocols for MANETs are designed based on the assumption that all participating nodes are fully cooperative. In such networks, the Selfish behaviour is among the big challenges and can lead to the degradation of the network performance. We{\textquoteright}ll introduce this behaviour and. We{\textquoteright}ll propose a new profile to assess the impact on the performance of MANETs: energy, end-to-end delay, routing packets and the packets rate. The Results of simulations using OLSR and AODV protocols have shown that energy-based selfishness is a big problem and could affect the performance in MANETS depending on the mobility of nodes, size and density of the network. We{\textquoteright}ll try to prove that the selfish behaviour can be used to impact quality of service (QoS) and security in MANETS. Therefore, we need a comprehensive mechanism to cope with this behaviour to isolate selfishness nodes and we have planned to publish it in the future work as a new strategy based on game theory to encourage nodes to cooperate.}, isbn = {978-1-4799-5627-2}, author = {Amraoui, Hicham and Habbani, Ahmed and Hajami, Abdelmajid} } @conference { ISI:000371484600022, title = {Efficient Vertical handover scheme on IMS network and cost analysis}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {129-134}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {According to the various mobility, the resource allocation, interference, and the characteristics of heterogeneity and ubiquity of access network based on the next generation network, namely, the network IMS (IP Multimedia subsystem), have imposed several challenges. In regard to mobility management, handover management, and Quality of Service (QoS) provisioning, this paper proposes an adaptive environment based on IMS, between the heterogeneous access technologies, like WiMAX, WLAN, UMTS and LTE/LTE Advanced interfaces. Nevertheless, especially, the problem of handover decision, as resolving it influence, mainly on the handover performance; ANN(Artificial Neural Network) is proposed to solve the problem of decision and selection of best network access based IMS. The objective of proposed approach algorithms, is to maximize handover performance and reliability, with a new generic mobility management architecture, for system resource utilization by reducing handover latency and achieving load balance between different heterogeneous cells and networks}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Allouch, Hamid and Belkasmi, Mostafa}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @article {Fath-Allah2014101, title = {E-government portals best practices: A comprehensive survey}, journal = {Electronic Government}, volume = {11}, number = {1-2}, year = {2014}, note = {cited By 5}, pages = {101-132}, abstract = {An e-government portal is a website that is offering various useful electronic services to the citizens. With the traditional government portals (offices), the services provided to the citizens need a lot of paperwork and many officers are required to conduct such services, and also the citizens need to be present personally, which means they have to leave their jobs for many hours. Thus, making these services as electronic ones through the web will result in great savings for governmental entities and citizens. This paper exploring the e-government portal{\textquoteright}s best practices collected from research and industry. Although these best practices exist in the literature, they are not classified or grouped in a logical way. In addition, this paper aims to provide a structured overview of these best practices according to three categories of best practices (i.e., back-end, front-end, and external). Copyright {\textcopyright} 2014 Inderscience Enterprises Ltd.}, doi = {10.1504/EG.2014.063316}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904336187\&doi=10.1504\%2fEG.2014.063316\&partnerID=40\&md5=3bbc18ed114bf20b4e24d4c7c8ad0b91}, author = {Fath-Allah, A.a and Cheikhi, L.a and Al-Qutaish, R.E.b and Idri, A.a} } @conference { ISI:000371484600023, title = {Elasticity and Scalability Centric Quality Model for the Cloud}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {135-140}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {Cloud computing seems to be the most logical shift in terms of Information Technology after Internet, Social Networking... Despite the potential benefits that cloud computing offers, the model brings new issues, challenges, and needs in term of SLA formalization, Quality of Service (QoS) evaluation due to the heterogeneous resources and to the special features it implies, such as Elasticity and Scalability. In the scope of this paper we focus on the Elasticity and Scalability attributes to assess their impact on the QoS. The paper provides a multi-lenses overview that can help both cloud consumers and potential business application{\textquoteright}s owners to understand, analyze, and evaluate important aspects related to Scalability and Elasticity capabilities. We determine and analyze the key features of these characteristics and derive metrics that evaluate the cloud elasticity-centric capabilities. We present a specific quality model for those two characteristics derived from their sub-attributes.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Alfath, Abdeladim and Baina, Salah and Baina, Karim}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @article { ISI:000339687600009, title = {Estimation of time card of collision (TTC) based on the optical flot in para-cadioptric vision}, journal = {TRAITEMENT DU SIGNAL}, volume = {31}, number = {1-2}, year = {2014}, month = {JAN-JUN}, pages = {197-219}, issn = {0765-0019}, author = {Benamar, Fatima Zahra and El Fkihi, Sanaa and Demonceaux, Cedric and Mouaddib, El Mustapha and Aboutajdine, Driss} } @conference {Ouhbi2014141, title = {Evaluating software product quality: A systematic mapping study}, booktitle = {Proceedings - 2014 Joint Conference of the International Workshop on Software Measurement, IWSM 2014 and the International Conference on Software Process and Product Measurement, Mensura 2014}, year = {2014}, note = {cited By 5}, pages = {141-151}, abstract = {Evaluating software product quality (SPQ) is an important task to ensure the quality of software products. In this paper a systematic mapping study was performed to summarize the existing SPQ evaluation (SPQE) approaches in literature and to classify the selected studies according to seven classification criteria: SPQE approaches, research types, empirical types, data sets used in the empirical evaluation of these studies, artifacts, SQ models, and SQ characteristics. Publication channels and trends were also identified. 57 papers were selected. The results show that the main publication sources of the papers identified were journals. Data mining techniques are the most frequently approaches reported in literature. Solution proposals were the main research type identified. The majority of the selected papers were history-based evaluations using existing data, which were mainly obtained from open source software projects and domain specific projects. Source code was the main artifacts used by SPQE approaches. Well-known SQ models were mentioned by half of the selected papers and reliability is the SQ characteristic through which SPQE was mainly achieved. SPQE-related subjects seem to attract more interest from researchers since the past years. {\textcopyright} 2014 IEEE.}, doi = {10.1109/IWSM.Mensura.2014.30}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84924054129\&doi=10.1109\%2fIWSM.Mensura.2014.30\&partnerID=40\&md5=dde0c90f7da7b7c14764aae521798235}, author = {Ouhbi, S.a and Idri, A.a and Aleman, J.L.F.b and Toval, A.b} } @conference { ISI:000346582400023, title = {Evolutionary algorithm for a Green vehicle routing problem with multiple trips}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {148+}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {This paper deals with a variant of vehicle routing problem where vehicles are allowed to take more than one route during the working day. The depreciation of the vehicle may be a bad investment for green transportation because it could generate more emissions. Hence, it is necessary to satisfy green transportation requirements by reducing the CO2 emissions from road transportation. The objective is to optimize the amount of greenhouse gas emissions. A restricted fleet size is used to serve demands, so the vehicles could exceed the time horizon. It is subject also to minimize the maximum overtime to find feasible solutions. A mathematical model has been proposed for the Green Vehicle Routing Problem with multiple trips (GVRPM). An evolutionary algorithm has been developed to solve it by combining a genetic algorithm with a local search procedure. The effectiveness of our approach is tested on a set of benchmarks. Comparing with existing algorithm, our approach shows competitive performance and contributes many new best solutions.}, isbn = {978-1-4799-4650-1}, author = {Ayadi, Rajaa and ElIdrissi, Adiba ElBouzekri and Benadada, Youssef and Alaoui, Ahmed El Hilali}, editor = {Benadada, Y} } @conference {Ayadi2014148, title = {Evolutionary algorithm for a Green vehicle routing problem with multiple trips}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 0}, pages = {148-154}, abstract = {This paper deals with a variant of vehicle routing problem where vehicles are allowed to take more than one route during the working day. The depreciation of the vehicle may be a bad investment for green transportation because it could generate more emissions. Hence, it is necessary to satisfy green transportation requirements by reducing the CO2 emissions from road transportation. The objective is to optimize the amount of greenhouse gas emissions. A restricted fleet size is used to serve demands, so the vehicles could exceed the time horizon. It is subject also to minimize the maximum overtime to find feasible solutions. A mathematical model has been proposed for the Green Vehicle Routing Problem with multiple trips (GVRPM). An evolutionary algorithm has been developed to solve it by combining a genetic algorithm with a local search procedure. The effectiveness of our approach is tested on a set of benchmarks. Comparing with existing algorithm, our approach shows competitive performance and contributes many new best solutions. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887432}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908638694\&doi=10.1109\%2fGOL.2014.6887432\&partnerID=40\&md5=0d81bed3c4e8a5f48a72694e68674134}, author = {Ayadi, R.a and Elidrissi, A.E.b and Benadada, Y.a and El Hilali Alaoui, A.a} } @conference {Sarhani201479, title = {An extension of X13-ARIMA-SEATS to forecast islamic holidays effect on logistic activities}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 0}, pages = {79-84}, abstract = {To better manage and optimize logistic activities, factors that affect it must be determined: The calendar effect is one of these factors which must be analyzed. Analyzing such kind of data by using classical time series forecasting methods, such as exponential smoothing method and ARIMA model, will fail to capture such variation. This paper is released to present a review of the models which are used to forecast the calendar effect, especially moving holidays effect. We adopt the recent approach of X13-ARIMA-SEATS and extend it for being able to forecast the effect of Islamic holidays. Our extension is applied to Moroccan case studies, and aims to give recommendations concerning this effect on logistic activities. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887423}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908655728\&doi=10.1109\%2fGOL.2014.6887423\&partnerID=40\&md5=de88391f3f7ec0517e753c7aeb4aeae4}, author = {Sarhani, M. and El Afia, A.} } @conference { ISI:000346582400013, title = {An Extension of X13-ARIMA-SEATS to Forecast Islamic Holidays Effect on Logistic Activities}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {79-84}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {To better manage and optimize logistic activities, factors that affect it must be determined: The calendar effect is one of these factors which must be analyzed. Analyzing such kind of data by using classical time series forecasting methods, such as exponential smoothing method and ARIMA model, will fail to capture such variation. This paper is released to present a review of the models which are used to forecast the calendar effect, especially moving holidays effect. We adopt the recent approach of X13-ARIMA-SEATS and extend it for being able to forecast the effect of Islamic holidays. Our extension is applied to Moroccan case studies, and aims to give recommendations concerning this effect on logistic activities.}, isbn = {978-1-4799-4650-1}, author = {Sarhani, Malek and El Afia, Abdellatif}, editor = {Benadada, Y} } @conference {Koulali2014845, title = {Ferry-based architecture for Participatory Sensing}, booktitle = {IWCMC 2014 - 10th International Wireless Communications and Mobile Computing Conference}, year = {2014}, note = {cited By 0}, pages = {845-849}, abstract = {The concept of Participatory Sensing is centered on individuals that collect data using their smart phones (or other dedicated devices) to track the evolution of their work/living places. The main objective is to use the gathered data in order to enhance the offered quality of life. To maximize people involvement in this process we propose a ferry based architecture to leverage the contributors from charges associated with accessing service providers infrastructure to forward collected data. Opportunistic contact with message ferries will be exploited to gather data that will be carried from ferry-To-ferry till it reaches a centralized processing and decision-Taking authority. We provide a closed formula for the End-To-End throughput of the proposed gathering network architecture. {\textcopyright} 2014 IEEE.}, doi = {10.1109/IWCMC.2014.6906466}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908610867\&doi=10.1109\%2fIWCMC.2014.6906466\&partnerID=40\&md5=20f82b554230199456e13e347b4ecbe0}, author = {Koulali, S.a and Sabir, E.b and Kobbane, A.c and Azizi, M.a} } @conference { ISI:000361140000144, title = {Ferry-based Architecture for Participatory sensing}, booktitle = {2014 INTERNATIONAL WIRELESS COMMUNICATIONS AND MOBILE COMPUTING CONFERENCE (IWCMC)}, series = {International Wireless Communications and Mobile Computing Conference}, year = {2014}, note = {10th International Wireless Communications \& Mobile Computing Conference (IWCMC), Nicosia, CYPRUS, AUG 04-08, 2014}, pages = {845-849}, publisher = {IEEE; IEEE Cyprus Sect; Frederick Univ}, organization = {IEEE; IEEE Cyprus Sect; Frederick Univ}, abstract = {The concept of Participatory Sensing is centered on individuals that collect data using their smart phones (or other dedicated devices) to track the evolution of their work/living places. The main objective is to use the gathered data in order to enhance the offered quality of life. To maximize people involvement in this process we propose a ferry based architecture to leverage the contributors from charges associated with accessing service providers infrastructure to forward collected data. Opportunistic contact with message ferries will be exploited to gather data that will be carried from ferry-to-ferry till it reaches a centralized processing and decision-taking authority. We provide a closed formula for the End-to-End throughput of the proposed gathering network architecture.}, isbn = {978-1-4799-0959-9}, issn = {2376-6492}, author = {Koulali, Sara and Sabir, Essaid and Kobbane, Abdellatif and Azizi, Mostafa} } @conference { ISI:000366999600264, title = {FPGA Implementation of HIHO and SIHO Decoders for DSC Codes}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {1461-1464}, abstract = {This paper presents the study of two decoder architectures and their VHDL design and implementation on a FPGA circuit. Both decoders are designed for Difference Set Cyclic (DSC) codes. The first one is the Hard In - Hard Out decoder (HIHO), and the second one is the Massey{\textquoteright}s threshold Soft In - Hard Out (SIHO) decoder. The two architectures are analyzed and discussed for serial and parallel processing implementations. The results show that the complexity, measured by the number of Logical Elements (LE) which is directly proportional to the silicon area occupied by the decoder, varies linearly for serial processing and hyperbolically for parallel processing. Increased complexity related to parallel processing can be accepted in turbo decoders. The resulting latency (L) is equal to the code length (n) multiplied by the clock period (H): (L=n{*}H).}, isbn = {978-1-4799-3824-7}, author = {Boudaoud, Abdelghani and Abdelmounim, Elliassane and Barazzouk, Abdellfattah and Zbitou, Jamal and Belkasmi, Mostafa} } @conference {Slimani2014168, title = {Game theory to control logistic costs in a two-echelon supply chain}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 4}, pages = {168-170}, abstract = {As a mathematical tool of the decision maker, game theory is an essential methodology to analyze and solve situations where the decision of each rational agent, called player, affects the other agent{\textquoteright}s payoff. Indeed, in this work game theory is used to analyze an inventory and transportation optimization problem within a supply chain composed of two agents: a retailer who faces a random demand of a final product and his supplier of raw materials that is also responsible of the transportation function. Since the retailer is closer to the market and has a better view of demand forecasts careful attention given to Information sharing and its impacts. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887435}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908632329\&doi=10.1109\%2fGOL.2014.6887435\&partnerID=40\&md5=059b1ee51ffe9bf41468fce7e9cd9ed5}, author = {Slimani, I. and Achchab, S.} } @conference { ISI:000346582400026, title = {Game theory to control logistic costs in a two-echelon supply chain}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {168-171}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {As a mathematical tool of the decision maker, game theory is an essential methodology to analyze and solve situations where the decision of each rational agent, called player, affects the other agent{\textquoteright}s payoff. Indeed, in this work game theory is used to analyze an inventory and transportation optimization problem within a supply chain composed of two agents: a retailer who faces a random demand of a final product and his supplier of raw materials that is also responsible of the transportation function. Since the retailer is closer to the market and has a better view of demand forecasts careful attention given to Information sharing and its impacts.}, isbn = {978-1-4799-4650-1}, author = {Slimani, Ilham and Achchab, Said}, editor = {Benadada, Y} } @conference {Koudougnon2014118, title = {A Genetic Algorithms based detector for MIMO STBC systems}, booktitle = {WCCCS 2014 - Proceedings; 2014 5th Workshop on Codes, Cryptography and Communication Systems}, year = {2014}, note = {cited By 0}, pages = {118-123}, abstract = {This paper proposes a new STBC MIMO detector based on Genetic Algorithms (GA) that can approach the performances of an optimal detector based on the maximum likekihood dtector (MLD). This detector is a hybridization between the GA and a linear detector such as ZF or MMSE detector. The optimization of the parameters of the GA can significantly improve performances. Our detector was compared with other equivalent detectors. The results show that our proposed algorithm gives better performance. {\textcopyright} 2014 IEEE.}, doi = {10.1109/WCCCS.2014.7107906}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983119233\&doi=10.1109\%2fWCCCS.2014.7107906\&partnerID=40\&md5=a83799f8aa9373bdc7b5106c86a7ddc0}, author = {Koudougnon, H. and Azouaoui, A. and Belkasmi, M.} } @article {10001634720141101, title = {A Graph Based Approach to Trace Models Composition.}, journal = {Journal of Software (1796217X)}, volume = {9}, number = {11}, year = {2014}, pages = {2813 - 2822}, abstract = {A model driven engineering process involves different and heterogeneous models that represent various perspectives of the system under development. The model composition operation allows combining those sub-models into an integrated view, but remains a tedious activity. For that, traceability information must be maintained to comprehend the composition effects and better manage the operation itself. Against this context, the current paper describes a framework for model composition traceability. We consider the traces generation concern as a crosscutting concern where the weaving mechanism is performed using graph transformations. A composition specification case study is presented to illustrate our contribution. [ABSTRACT FROM AUTHOR]}, keywords = {aspect oriented modeling, Computer engineering, Computer simulation, Graph theory, graph transformation, Heterogeneous computing, Information theory, model composition, model transformation, traceability}, issn = {1796217X}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=100016347\&site=ehost-live}, author = {Laghouaouta, Youness and Anwar, Adil and Nassar, Mahmoud and Coulette, Bernard} } @conference { ISI:000371484600074, title = {Green opportunistic access for CRN with selfish users using Coalitional game Approach in partition form}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {425-429}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {We study energy conservation and system performance of decentralized cognitive radio network with cognitive users. Every cognitive radio (CR) user needs to sense the channel to send data, if the channel is unoccupied by a licensed user only one winner transmits its data. Hence, all other users waste their energy in sensing the licensed bands and in negotiating the resource access without managing to transmit data packets. We formulate the sensing-energy tradeoff problem for distributed cognitive radio network as a coalition formation game. Our contribution is to apply the coalitional game design to the most important phases of cognitive radio network (CRN): sensing and data transmission. The concept of coalitional game may enforce the cooperative behavior of CR users in order to improve the system performance (energy and delay..). The network is modeled using a coalitional game in partition form with non transferable utility (NTU). After forming coalitions, no CR user in those coalitions aim to change its position. Indeed, the members of this coalition will cooperate both in sensing and access phases to improve system performance. The selfish behavior becomes evident when the cognitive users are constrained to send their data in a short delay and with a minimum of energy consumption. Our proposed algorithm explains how the selfish atittude of the cognitive user leds to join the coalition that increases its individual gain, and tacitly to improve the performance of the entire system.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Belghiti, Imane Daha and Elmachkour, Mouna and Berrada, Ismail and Kobbane, Abdellatif}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @conference {Ghenname201439, title = {A hashtags dictionary from crowdsourced definitions}, booktitle = {Proceedings - International Conference on Data Engineering}, year = {2014}, note = {cited By 1}, pages = {39-44}, abstract = {Hashtags are user-defined terms used on the Web to tag messages like microposts, as featured on Twitter. Because a hashtag is a textual word, its representation does not convey all the concepts it embodies. Several online dictionaries have been manually and collaboratively built to provide natural language definitions of hashtags. Unfortunately, these dictionaries in their rough form are inefficient for their inclusion in automatic text processing systems. As hashtags can be polysemic, dictionaries are also agnostic to collision of hashtags. This paper presents our approach for the automatic structuration of hashtags definitions into synonym rings. We present the output as a so-called folksionary, i.e. a single integrated dictionary built from everybody{\textquoteright}s definitions. For this purpose, we achieved a semantic-relatedness clustering to group definitions that share the same meaning. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICDEW.2014.6818300}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84901760198\&doi=10.1109\%2fICDEW.2014.6818300\&partnerID=40\&md5=540602f5d144ab86a039b3b88df86c33}, author = {Ghenname, M.a and Subercaze, J.a and Gravier, C.a and Laforest, F.a and Abik, M.b and Ajhoun, R.b} } @conference { ISI:000366168000006, title = {A hashtags dictionary from crowdsourced definitions}, booktitle = {2014 IEEE 30TH INTERNATIONAL CONFERENCE ON DATA ENGINEERING WORKSHOPS (ICDEW)}, year = {2014}, note = {IEEE 30th International Conference on Data Engineering (ICDE), Chicago, IL, MAR 31-APR 04, 2014}, pages = {39-44}, publisher = {IEEE; Microsoft; Qatar Comp Res Inst; HERE Nokia; Purdue Univ, Cyber Ctr; NW Univ, McCormick Sch Engn; Google}, organization = {IEEE; Microsoft; Qatar Comp Res Inst; HERE Nokia; Purdue Univ, Cyber Ctr; NW Univ, McCormick Sch Engn; Google}, abstract = {Hashtags are user-defined terms used on the Web to tag messages like microposts, as featured on Twitter. Because a hashtag is a textual word, its representation does not convey all the concepts it embodies. Several online dictionaries have been manually and collaboratively built to provide natural language definitions of hashtags. Unfortunately, these dictionaries in their rough form are inefficient for their inclusion in automatic text processing systems. As hashtags can be polysemic, dictionaries are also agnostic to collision of hashtags. This paper presents our approach for the automatic structuration of hashtags definitions into synonym rings. We present the output as a so-called folk-sionary, i.e. a single integrated dictionary built from everybody{\textquoteright}s definitions. For this purpose, we achieved a semantic-relatedness clustering to group definitions that share the same meaning.}, isbn = {978-1-4799-3481-2}, author = {Ghenname, Merieme and Subercaze, Julien and Gravier, Christophe and Laforest, Frederique and Abik, Mounia and Ajhoun, Rachida} } @conference {ElHamlaoui2014, title = {Heterogeneous models matching for consistency management}, booktitle = {Proceedings - International Conference on Research Challenges in Information Science}, year = {2014}, note = {cited By 2}, abstract = {This work is situated in the context of the application of Model Driven Engineering to complex systems view-based modelling. In fact, view-based models - called also partial models - are manipulated by different actors (designers), and are thus generally heterogeneous, that is, described with different DSLs (Domain Specific Languages). Instead of building a single global model, which is not realistic, we propose to organize the different partial models as a network of related models, which provides a global view of the system through a correspondence model. As models are modelled separately by different designers, they also evolve separately that induces a problem of consistency. To solve it, we propose a semi-automatic process based on the correspondence model allowing detecting changes, calculating their impacts, and proposing modifications to maintain the consistency among them. The approach is supported by a tool chain and illustrated by the example of a Bug Tracking System. {\textcopyright} 2014 IEEE.}, doi = {10.1109/RCIS.2014.6861074}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906751582\&doi=10.1109\%2fRCIS.2014.6861074\&partnerID=40\&md5=24df7be74c52394ea7231715c49b9680}, author = {El Hamlaoui, M.a b and Ebersold, S.a and Coulette, B.a and Nassar, M.b and Anwar, A.c} } @conference {Jorio2014861, title = {A hierarchical clustering algorithm based on spectral classification for Wireless Sensor Networks}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2014}, note = {cited By 1}, pages = {861-866}, abstract = {A Wireless Sensor Network (WSN) is composed of a large number of autonomous and compact devices called sensor nodes. This network can be an effective tool for gathering data in a variety of enviornments. However, These sensor nodes have some constraints due to their limited energy, storage capacity and computing power. Therefor, saving energy and, thus extending the WSN lifetime entails great challenges. In order to prolong the lifetime of WSN, this study presents a hierarchical clustering algorithm based on spectral classification (HCA-SC). First, to overcome the ideal distribution of clusters, HCA-SC partition the network by spectral classification algorithm. Second, for each cluster, HCA-SC selects a node as a cluster head with regard residual energy and distance from base station. Simulation results showed that our algorithm performs better in reducing the energy consumption of sensor nodes and effectively improves the lifetime of wireless sensor networks. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICMCS.2014.6911354}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928732445\&doi=10.1109\%2fICMCS.2014.6911354\&partnerID=40\&md5=2070584e9f01d9b450a2a4a005ba6a08}, author = {Jorio, A.a and El Fkihi, S.b and Elbhiri, B.c and Aboutajdine, D.a} } @conference { ISI:000366999600151, title = {A Hierarchical Clustering Algorithm based on Spectral Classification for Wireless Sensor Networks}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {861-866}, abstract = {A Wireless Sensor Network (WSN) is composed of a large number of autonomous and compact devices called sensor nodes. This network can be an effective tool for gathering data in a variety of enviornments. However, These sensor nodes have some constraints due to their limited energy, storage capacity and computing power. Therefor, saving energy and, thus extending the WSN lifetime entails great challenges. In order to prolong the lifetime of WSN, this study presents a hierarchical clustering algorithm based on spectral classification (HCA-SC). First, to overcome the ideal distribution of clusters, HCA-SC partition the network by spectral classification algorithm. Second, for each cluster, HCA-SC selects a node as a cluster head with regard residual energy and distance from base station. Simulation results showed that our algorithm performs better in reducing the energy consumption of sensor nodes and effectively improves the lifetime of wireless sensor networks.}, isbn = {978-1-4799-3824-7}, author = {Jorio, Ali and El Fkihi, Sanaa and Elbhiri, Brahim and Aboutajdine, Driss} } @article {Elmaroud20141951, title = {On the impact of prototype filter length on the PAPR reduction of FBMC signals}, journal = {International Journal of Engineering and Technology}, volume = {6}, number = {4}, year = {2014}, note = {cited By 0}, pages = {1951-1960}, abstract = {One of the challenging issues for Filter Bank Multicarrier (FBMC) systems is their high Peak-to-Average Power Ratio (PAPR) which leads to the saturation of the high power amplifiers and consequently increases the out of band power. In this paper, we evaluate different FBMC PAPR reduction techniques and especially analyze the impact of the prototype filter length/overlapping factor on their performances.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908869430\&partnerID=40\&md5=653efcab8375d7b42c7c294a7bf8e2b5}, author = {Elmaroud, B.a and Faqihi, A.a b and Abbad, M.a and Aboutajdine, D.a} } @conference {Amazal2014247, title = {Improving fuzzy analogy based software development effort estimation}, booktitle = {Proceedings - Asia-Pacific Software Engineering Conference, APSEC}, volume = {1}, year = {2014}, note = {cited By 0}, pages = {247-254}, abstract = {Analogy-based estimation has recently emerged as a promising technique and a viable alternative to other conventional estimation methods. One of the most important research areas for analogy-based cost estimation is how to predict the effort of software projects when they are described by mixed numerical and categorical data. To address this issue, we have proposed, in an earlier work, a new approach called fuzzy analogy combining the key features of fuzzy logic and analogybased reasoning. However, fuzzy analogy may only be used when the possible values of the categorical attributes are derived from a numerical domain. The current study aims to extend our former approach to correctly handle categorical data. To this end, the fuzzy k-modes algorithm is used with two initialization techniques. The performance of the proposed approach was compared with that of classical analogy using the International Software Benchmarking Standards Group (ISBSG) dataset. The obtained results show significant improvement in estimation accuracy. {\textcopyright} 2014 IEEE.}, doi = {10.1109/APSEC.2014.46}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84951282652\&doi=10.1109\%2fAPSEC.2014.46\&partnerID=40\&md5=864ca37db60e0c0338a8f4ae557a43a4}, author = {Amazal, F.A.a and Idri, A.a and Abran, A.b} } @conference { ISI:000371484600008, title = {Integrating Linked Sensor Data for On-line Analytical Processing On-The-Fly}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {43-47}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {Sensor networks are gaining more and more attention in the current technology landscape. It is undeniable that their use allows a better monitoring of events that occur in the real world. Many sensors have been deployed for monitoring applications such as environmental monitoring, and traffic monitoring. A number of governments, corporates, and academic organizations or agencies hold independently sensor systems that generate a large amount of dynamic information from data sources with various formats of schemas and data. They are making this sensor data openly accessible by publishing it as Linked Sensor Data (LSD) on the Linked Open Data (LOD) cloud. LSD is the concept that defines the publication of public or private organization sensor data without restrictions. This is achieved by transforming raw sensor observations to RDF format and by linking it with other datasets on the LOD cloud. The seamless integration of LSD sources from multiple providers is a great challenge. In this paper, we investigate the possibility of integrating diverse LSD sources using the hybrid ontology approach for on-line analytical processing (OLAP) on-the-fly. With such an ontology-based integration framework, organizations or individuals will have greater opportunity to make their respective analysis based on a large amount of sensor data openly accessible on the Web.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Guilavogui, Koly and Kjiri, Laila and Fredj, Mounia}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @conference { ISI:000380387700064, title = {Intelligent System Based Support Vector Regression For Supply Chain Demand Forecasting}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {79-83}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {Supply chain management (SCM) is an emerging field that has commanded attention from different communities. On the one hand, the optimization of supply chain which is an important issue, requires a reliable prediction of future demand. On the other hand, It has been shown that intelligent systems and machine learning techniques are useful for forecasting in several applied domains. In this paper, we introduce the machine learning technique of time series forecasting Support Vector Regression (SVR) which is nowadays frequently used. Furthermore, we use the Particle Swarm Optimization (PSO) algorithm to optimize the SVR parameters. We investigate the accuracy of this approach for supply chain demand forecasting by applying it to a case study.}, isbn = {978-1-4799-4647-1}, author = {Sarhani, Malek and El Afia, Abdellatif} } @conference {Sarhani201479, title = {Intelligent system based support vector regression for supply chain demand forecasting}, booktitle = {2014 2nd World Conference on Complex Systems, WCCS 2014}, year = {2014}, note = {cited By 0}, pages = {79-83}, abstract = {Supply chain management (SCM) is an emerging field that has commanded attention from different communities. On the one hand, the optimization of supply chain which is an important issue, requires a reliable prediction of future demand. On the other hand, It has been shown that intelligent systems and machine learning techniques are useful for forecasting in several applied domains. In this paper, we introduce the machine learning technique of time series forecasting Support Vector Regression (SVR) which is nowadays frequently used. Furthermore, we use the Particle Swarm Optimization (PSO) algorithm to optimize the SVR parameters. We investigate the accuracy of this approach for supply chain demand forecasting by applying it to a case study. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICoCS.2014.7060941}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929162457\&doi=10.1109\%2fICoCS.2014.7060941\&partnerID=40\&md5=8f615fc2cd27fff20b0220a543294e57}, author = {Sarhani, M. and El Afia, A.} } @conference {Attaoui2014233, title = {Malicious workers tolerance in an agent-based grid resource brokering system - Preliminary considerations}, booktitle = {Proceedings - 2014 International Conference on Computational Science and Computational Intelligence, CSCI 2014}, volume = {2}, year = {2014}, note = {cited By 0}, pages = {233-236}, abstract = {The Agent in Grid (AiG) project develops an agent-based infrastructure for resource management in the Grid. The basic assumptions of the project are that (1) agents work in teams, and (2) information is ontologically represented and semantically processed. Thus far, issues involved in trust management were considered only from the point of view of fulfillment of contracts between stakeholders. However, it is also possible that some workers that fulfill their contracts, return incorrect results. In this note, we consider how the trust management in the AiG project can be further conceptualized by using a reputation-based voting. {\textcopyright} 2014 IEEE.}, doi = {10.1109/CSCI.2014.126}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84902688143\&doi=10.1109\%2fCSCI.2014.126\&partnerID=40\&md5=e68a3d1c454b5d1f8c9cfca6c49faa4a}, author = {Attaoui, N.a and Essaaidi, M.b and Paprzycki, M.c and Ganzha, M.c} } @conference { ISI:000355910900043, title = {Malicious Workers Tolerance in an Agent-Based Grid Resource Brokering System - Preliminary Considerations (Short Paper)}, booktitle = {2014 INTERNATIONAL CONFERENCE ON COMPUTATIONAL SCIENCE AND COMPUTATIONAL INTELLIGENCE (CSCI), VOL 2}, year = {2014}, note = {International Conference on Computational Science and Computational Intelligence (CSCI), Las Vegas, NV, MAR 10-13, 2014}, pages = {233-236}, publisher = {American Council Sci \& Educ}, organization = {American Council Sci \& Educ}, abstract = {The Agent in Grid (AiG) project develops an agent-based infrastructure for resource management in the Grid. The basic assumptions of the project are that (1) agents work in teams, and (2) information is ontologically represented and semantically processed. Thus far, issues involved in trust management were considered only from the point of view of fulfillment of contracts between stakeholders. However, it is also possible that some workers that fulfill their contracts, return incorrect results. In this note, we consider how the trust management in the AiG project can be further conceptualized by using a reputation-based voting.}, isbn = {978-1-4799-3009-8}, doi = {10.1109/CSCI.2014.126}, author = {Attaoui, Naoual and Essaaidi, Mohammad and Paprzycki (Ca), Marcin and Ganzha, Maria}, editor = {Akhgar, B and Arabnia, HR} } @article {11154994420140101, title = {MALICIOUS WORKERS TOLERANCE IN AN AGENT-BASED GRID RESOURCE BROKERING SYSTEM.}, journal = {IADIS International Journal on Computer Science \& Information Systems}, volume = {9}, number = {1}, year = {2014}, pages = {55 - 70}, abstract = {The Agent in Grid (AiG) project aims to develop an agent-based infrastructure that facilitates the resource management in the Grid. The core assumptions behind the system are: software agents representing Grid resources work in teams, and all information is ontologically represented and semantically processed. Thus far, trust management in AiG system is approached from the perspective of breaching an agreement between Users and teams or contracts between teams and Workers. Unfortunately, it is also possible that some Workers, which fulfilled the agreement, could have maliciously returned incorrect results from the jobs that they executed. In this paper, we discuss how the trust management in the AiG project can be improved by using a reputation-based voting technique, in order to assure reliable job execution. [ABSTRACT FROM AUTHOR]}, keywords = {agent system, Computational grids (Computer systems), Distributed computing, Grid computing, Inte lligent agents (Computer software), Job performance, majority voting, malicious workers tolerance, reputation, Teams in the workplace, trust management}, issn = {16463692}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=111549944\&site=ehost-live}, author = {Attaoui, Naoual and Bendahmane, Ahmed and Essaaidi, Mohammad and Sanyal, Sugata and Ganzha, Maria and Paprzycki, Marcin and Wasielewska, Katarzyna} } @conference { ISI:000346582400008, title = {A metaheuristic approach for solving the airline maintenance routing with aircraft on ground problem}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {48+}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {In the airline industry, the Aircraft Maintenance Routing (AMR) problem has been one of the great successes of operations research. The AMR problem is to determine a particular route for each aircraft to undergo different levels of maintenance checks. The objective is to minimize the total maintenance costs. In this study, our aim is to present a mathematical formulation for the AMR problem which takes into account the case of Aircraft On Ground (AOG). We develop solution approaches based on Particle Swarm Optimization algorithm and Genetic algorithm for solving the problem. The results show the effectiveness of this solution in reducing computational time.}, isbn = {978-1-4799-4650-1}, author = {Ezzinbi, Omar and Sarhani, Malek and El Afia, Abdellatif and Benadada, Youssef}, editor = {Benadada, Y} } @conference {Ezzinbi201448, title = {A metaheuristic approach for solving the airline maintenance routing with aircraft on ground problem}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 1}, pages = {48-52}, abstract = {In the airline industry, the Aircraft Maintenance Routing (AMR) problem has been one of the great successes of operations research. The AMR problem is to determine a particular route for each aircraft to undergo different levels of maintenance checks. The objective is to minimize the total maintenance costs. In this study, our aim is to present a mathematical formulation for the AMR problem which takes into account the case of Aircraft On Ground (AOG). We develop solution approaches based on Particle Swarm Optimization algorithm and Genetic algorithm for solving the problem. The results show the effectiveness of this solution in reducing computational time. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887446}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908628134\&doi=10.1109\%2fGOL.2014.6887446\&partnerID=40\&md5=0d3ce9e2a4abfc7cd5949d8a813591cc}, author = {Ezzinbi, O. and Sarhani, M. and El Afia, A. and Benadada, Y.} } @conference { ISI:000371484600037, title = {MINING USER PATTERNS FOR LOCATION PREDICTION IN MOBILE SOCIAL NETWORKS}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {213-218}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {Understanding human mobility dynamics is of an essential importance to today mobile applications, including context-aware advertising and city wide sensing applications. Recently, Location-based social networks (LBSNs) have attracted important researchers{\textquoteright} efforts, to investigate spatial, temporal and social aspects of user patterns. LBSNs allow users to {\textquoteleft}{\textquoteleft}check-in{{\textquoteright}{\textquoteright}} at geographical locations and share this information with friends. In this paper, analysis of check-ins data provided by Foursquare, the online location-based social network, allows us to construct a set of features that capture: spatial, temporal and similarity characteristics of user mobility. We apply this knowledge to location prediction problem, and combine these features in supervised learning for future location prediction. We find that the supervised classifier based on the combination of multiple features offers reasonable accuracy.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Mourchid, Fatima and Habbani, Ahmed and El koutbi, Mohamed}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @article {Adadi20141667, title = {Multi-agent architecture for business modeling of web services composition based on WS2JADE framework}, journal = {International Review on Computers and Software}, volume = {9}, number = {10}, year = {2014}, note = {cited By 1}, pages = {1667-1674}, abstract = {Web services have gained popularity today for enabling universal interoperability among applications. In order this, to answer the complex service requirements of the user, composite web services has to be constructed correctly and effectively. For this reason, various approaches have been used for web service composition. Among these approaches we quote the one who allows software agents to access and control Web Services, in this approach the integration between agents and web services platforms is important. For this purpose, the toolkit WS2JADE is developed at the Centre of Intelligent Agent and Multi-Agent Systems, it allows deployment of Web services as JADE (JAVA agent DEvelopment Framework) agents services at run time. Therefore, composition of web services is returned to that of JADE agents services. In this paper we propose a new approach for modeling the composition of JADE agents services and thus web services using the Multi-Agent Reactive Decisional System (MARDS) Model. {\textcopyright} 2014 Praise Worthy Prize S.r.l. - All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84918797197\&partnerID=40\&md5=e62783fd0572039e37459b2f34c82d37}, author = {Adadi, N.a and Berrada, M.a and Chenouni, D.a and Bounabat, B.b} } @conference {Rhanoui2014, title = {A multilevel contract model for dependable Feature-Oriented components}, booktitle = {2014 9th International Conference on Intelligent Systems: Theories and Applications, SITA 2014}, year = {2014}, note = {cited By 0}, abstract = {Feature-Oriented System Development is a promising and recent approach for developing domain-specific component-based systems. To be adopted in a safety-critical environment it must handle dependability requirements and offer mechanisms to ensure the reliability level of the components and the system. For this purpose, the contract-based approach is a lightweight formal method for designing and specifying systems{\textquoteright} requirements, it can be introduced in an early stage during the design phase. In this paper, we present a multilevel contract model and a domain-specific modeling language that aims to address reliability and quality issues for component-based systems by expressing and specifying a set of its properties and constraints. {\textcopyright} 2014 IEEE.}, doi = {10.1109/SITA.2014.6847278}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904596650\&doi=10.1109\%2fSITA.2014.6847278\&partnerID=40\&md5=6f574de296778d6b1ed4ff898fc66eb1}, author = {Rhanoui, M. and El Asri, B.} } @conference { ISI:000346142800003, title = {A Multilevel Contract Model for Dependable Feature-Oriented Components}, booktitle = {2014 9TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA{\textquoteright}14)}, year = {2014}, note = {9th International Conference on Intelligent Systems - Theories and Applications (SITA), Inst Natl Postes \& Telecommunicat, Rabat, MOROCCO, MAY 07-08, 2014}, publisher = {IEEE; IEEE Morocco}, organization = {IEEE; IEEE Morocco}, abstract = {Feature-Oriented System Development is a promising and recent approach for developing domain-specific component-based systems. To be adopted in a safety-critical environment it must handle dependability requirements and offer mechanisms to ensure the reliability level of the components and the system. For this purpose, the contract-based approach is a lightweight formal method for designing and specifying systems{\textquoteright} requirements, it can be introduced in an early stage during the design phase. In this paper, we present a multilevel contract model and a domain-specific modeling language that aims to address reliability and quality issues for component-based systems by expressing and specifying a set of its properties and constraints.}, isbn = {978-1-4799-3566-6}, author = {Rhanoui, Maryem and El Asri, Bouchra} } @conference { ISI:000371484600066, title = {Multiple Active Cores-based Shared Multicast Tree for mobile IPv6 environment}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {378-383}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {Due to the progress of network multimedia technology, internet research community proposed many different multicast routing protocols to support efficient real-time multimedia application such as, IPTV, videoconferencing, group games. These applications require a multicast routing protocol in which packets arrive to multicast receivers with minimum delay and delay variation. These applications are more important with arrival of mobile IPv6 protocol with mobile receivers and sources with continuous access. Nevertheless, the design of multicast protocols does not take into account that group members may be mobile. Dynamic group members and sources can rapidly affect quality of both routing protocol scheme and multicast tree used. The key idea of this work is to make the handover of multicast members transparent and a quick recovery mechanism to maintain an optimal multicast tree, by using MACT-MIPv6 architecture based on multicast routing protocol with Shared Multiple Active Cores Multicast Tree to hide the mobility of mobile multicast members from the main multicast delivery tree. Simulation results show that good performance is achieved in terms of handoff latency, end-to-end delay, tree construction delay and others metrics.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {Baddi, Youssef and El Kettani, Mohamed Dafir Ech-Cherif}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @conference { ISI:000363271300045, title = {New Iterative Decoding Algorithms of 2D Product Block Codes}, booktitle = {2014 WORLD CONGRESS ON COMPUTER APPLICATIONS AND INFORMATION SYSTEMS (WCCAIS)}, year = {2014}, note = {World Congress on Computer Applications and Information Systems (WCCAIS), Hammamet, TUNISIA, JAN 17-19, 2014}, abstract = {This paper presents four new iterative decoders for two dimensional product block codes (2D-PBC) based on Genetic Algorithms. Each of these iterative decoders runs in parallel on a number of processors connected by a network. They have almost the same complexity as the conventional iterative decoder, but their performances are improved since at each iteration, they trap the better of extrinsic information computed by the elementary decoders running simultaneously on all processors.}, isbn = {978-1-4799-3351-8}, author = {Ahmadi, Abdeslam and El Bouanani, Faissal and Ben-Azza, Hussain} } @conference {Ahmadi2014, title = {New iterative decoding algorithms of 2D product block codes}, booktitle = {2014 World Congress on Computer Applications and Information Systems, WCCAIS 2014}, year = {2014}, note = {cited By 0}, abstract = {This paper presents four new iterative decoders for two dimensional product block codes (2D-PBC) based on Genetic Algorithms. Each of these iterative decoders runs in parallel on a number of processors connected by a network. They have almost the same complexity as the conventional iterative decoder, but their performances are improved since at each iteration, they trap the better of extrinsic information computed by the elementary decoders running simultaneously on all processors. {\textcopyright} 2014 IEEE.}, doi = {10.1109/WCCAIS.2014.6916585}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908681895\&doi=10.1109\%2fWCCAIS.2014.6916585\&partnerID=40\&md5=99d8dab9c9071120a501d3ee0124facf}, author = {Ahmadi, A.a and Ben-Azza, H.a and El Bouanani, F.b} } @conference {Boukhriss2014124, title = {New technique of localization a targeted Virtual Machine in a Cloud platform}, booktitle = {WCCCS 2014 - Proceedings; 2014 5th Workshop on Codes, Cryptography and Communication Systems}, year = {2014}, note = {cited By 0}, pages = {124-127}, abstract = {Cloud computing gives another meaning to the word {\textquoteright}sharing{\textquoteright} in the world of networks. However, it gives rise to serious security problems. One of the techniques used by the attackers is the {\textquoteright}Cloud cartography{\textquoteright} which aims to locate a Virtual Machine in the cloud and launch a side channel attack. In this paper we propose a new technique for locating a virtual machine in a cloud environment. For this purpose, we will first trace the gateway of the cloud and then locate the target amongst the thousands of machines hosted in the cloud. Our attack is based on a new command TRACECL that can reach up to 100 routers, and we propose a method to locate the target Virtual Machine in a Cloud environment. {\textcopyright} 2014 IEEE.}, doi = {10.1109/WCCCS.2014.7107907}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949928318\&doi=10.1109\%2fWCCCS.2014.7107907\&partnerID=40\&md5=f2f286ec3f7af4547eec12ede767ec25}, author = {Boukhriss, H.a and Azougaghe, A.b and Hedabou, M.a} } @conference {Afyf20141475, title = {A novel miniaturized UWB antenna for microwave imaging}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2014}, note = {cited By 3}, pages = {1475-1478}, abstract = {A compact UWB planar antenna fed with a microstrip-line is proposed. The new design is composed of a square patch with a small size of 26 mm {\texttimes} 20 mm. with three rectangular slots and fed by 50 microstrip transmission line and a reduced ground-plane which have a T-shaped slot. It is intended to be used in Microwave Imaging applications that cover the ultra-wideband (UWB) frequency band. A wider bandwidth of around 3.98 - 17.26GHZ with stable radiation pattern is achieved. The proposed antenna has excellent characteristics, low profile and cost-effective compared to existing UWB antennas. The UWB antenna is designed and analyzed using CST Microwave Studio and HFSS software in transient mode to verify antenna parameters improvements. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICMCS.2014.6911396}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928738029\&doi=10.1109\%2fICMCS.2014.6911396\&partnerID=40\&md5=86bd1b82f5e31de785dab59387bf649f}, author = {Afyf, A. and Bellarbi, L.} } @conference { ISI:000366999600267, title = {A Novel Miniaturized UWB Antenna for Microwave Imaging}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {1475-1478}, abstract = {A compact UWB planar antenna fed with a microstrip-line is proposed. The new design is composed of a square patch with a small size of 26 mm x 20 mm. with three rectangular slots and fed by 50 Omega microstrip transmission line and a reduced ground-plane which have a T-shaped slot. It is intended to be used in Microwave Imaging applications that cover the ultra-wideband (UWB) frequency band. A wider bandwidth of around 3.98 - 17.26GHz with stable radiation pattern is achieved. The proposed antenna has excellent characteristics, low profile and cost-effective compared to existing UWB antennas. The UWB antenna is designed and analyzed using CST Microwave Studio and HFSS software in transient mode to verify antenna parameters improvements.}, isbn = {978-1-4799-3824-7}, author = {Afyf, Amal and Bellarbi, Larbi} } @conference { ISI:000345620500014, title = {PAPR Reduction of FBMC Signals by Combining Exponential Companding and Hadamard Transforms}, booktitle = {2014 INTERNATIONAL SYMPOSIUM ON NETWORKS, COMPUTERS AND COMMUNICATIONS}, year = {2014}, note = {International Symposium on Networks, Computers and Communications, Hammamet, TUNISIA, JUN 17-19, 2014}, abstract = {Filter Bank Multicarrier (FBMC) is an efficient multicarrier modulation characterized by its good spectral efficiency and low out of band leakage. Nevertheless, FBMC signals suffer from the high Peak-to-Average Power Ratio (PAPR). This leads nonlinear devices such as Power Amplifiers, when used, to operate in their saturation region which may severely impair system performance. In this paper, we propose a PAPR reduction technique based on the combination of Hadamard transform with the exponential companding transform. Simulation results show that the proposed scheme can offer better PAPR reduction than exponential companding and Hadamard transforms when used separately.}, isbn = {978-1-4799-5874-0}, author = {Elmaroud, Brahim and Faqihi, Ahmed and Abbad, Mohammed and Aboutajdine, Driss} } @conference {Elmaroud2014, title = {PAPR reduction of FBMC signals by combining exponential companding and hadamard transforms}, booktitle = {2014 International Symposium on Networks, Computers and Communications, ISNCC 2014}, year = {2014}, note = {cited By 0}, abstract = {Filter Bank Multicarrier (FBMC) is an efficient multicarrier modulation characterized by its good spectral efficiency and low out of band leakage. Nevertheless, FBMC signals suffer from the high Peak-to-Average Power Ratio (PAPR). This leads nonlinear devices such as Power Amplifiers, when used, to operate in their saturation region which may severely impair system performance. In this paper, we propose a PAPR reduction technique based on the combination of Hadamard transform with the exponential companding transform. Simulation results show that the proposed scheme can offer better PAPR reduction than exponential companding and Hadamard transforms when used separately. {\textcopyright} 2014 IEEE.}, doi = {10.1109/SNCC.2014.6866517}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907345923\&doi=10.1109\%2fSNCC.2014.6866517\&partnerID=40\&md5=982b03a2bd8c550f85b39b1d9c441790}, author = {Elmaroud, B.a and Faqihi, A.b and Abbad, M.a and Aboutajdine, D.a} } @conference { ISI:000346582400009, title = {Particle swarm optimization algorithm for solving airline crew scheduling problem}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {52-56}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {In air transport, the cost related to crew members presents one of the most important cost supported by airline companies. The objective of the crew scheduling problem is to determine a minimum-cost set of pairings so that every flight leg is assigned a qualified crew and every pairing satisfies the set of applicable work rules. In this paper, we propose a solution for the crew scheduling problem with Particle Swarm Optimization (PSO) algorithm, this solution approach is compared with the Genetic Algorithm (GA) for both crew pairing and crew assignment problems which are the two part of crew scheduling problem.}, isbn = {978-1-4799-4650-1}, author = {Ezzinbi, Omar and Sarhani, Malek and El Afia, Abdellatif and Benadada, Youssef}, editor = {Benadada, Y} } @conference {Ezzinbi201452, title = {Particle swarm optimization algorithm for solving airline crew scheduling problem}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 1}, pages = {52-56}, abstract = {In air transport, the cost related to crew members presents one of the most important cost supported by airline companies. The objective of the crew scheduling problem is to determine a minimum-cost set of pairings so that every flight leg is assigned a qualified crew and every pairing satisfies the set of applicable work rules. In this paper, we propose a solution for the crew scheduling problem with Particle Swarm Optimization (PSO) algorithm, this solution approach is compared with the Genetic Algorithm (GA) for both crew pairing and crew assignment problems which are the two part of crew scheduling problem. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887447}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908628135\&doi=10.1109\%2fGOL.2014.6887447\&partnerID=40\&md5=61f25771c33cea8f4845104ba352a303}, author = {Ezzinbi, O. and Sarhani, M. and El Afia, A. and Benadada, Y.} } @conference {Belkasmi2014, title = {Preface}, booktitle = {WCCCS 2014 - Proceedings; 2014 5th Workshop on Codes, Cryptography and Communication Systems}, year = {2014}, note = {cited By 0}, doi = {10.1109/WCCCS.2014.7107899}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983197048\&doi=10.1109\%2fWCCCS.2014.7107899\&partnerID=40\&md5=9d49b4a3c5ec947f575872ad21efecf5}, author = {Belkasmi, M.a and Aaroud, A.b} } @article { ISI:000330395300001, title = {Proactive Schema Based Link Lifetime Estimation and Connectivity Ratio}, journal = {SCIENTIFIC WORLD JOURNAL}, year = {2014}, abstract = {The radio link between a pair of wireless nodes is affected by a set of random factors such as transmission range, node mobility, and environment conditions. The properties of such radio links are continually experienced when nodes status balances between being reachable and being unreachable; thereby on completion of each experience the statistical distribution of link lifetime is updated. This aspect is emphasized in mobile ad hoc network especially when it is deployed in some fields that require intelligent processing of data information such as aerospace domain.}, issn = {1537-744X}, doi = {10.1155/2014/172014}, author = {Bachir, Bouamoud and Ali, Ouacha and Ahmed, Habbani and Mohamed, Elkoutbi} } @article {Lakki20141, title = {QoS and energy with clustering in MANETs}, journal = {Far East Journal of Electronics and Communications}, volume = {13}, number = {1}, year = {2014}, note = {cited By 1}, pages = {1-16}, abstract = {In the prior works, there are several works that address the problem of clustering in MANETs (ad hoc network). To form clusters and select cluster headers, each solution has a different criterion. In our case, it is based on the density of HELLO messages. In other words, we propose a clustering mechanism represented by a density parameter HELLO message for each cluster. This new metric we will consider in the selection process of multipoint relays to improve quality of service in mobile ad hoc networks, it gives a new version of the protocol OLSR named clustering OLSR. The NS2 simulation shows that this new version of OLSR protocol is better than the standard version. Performance indicators of our version of OLSR protocol improve the standard version. {\textcopyright} 2014 Pushpa Publishing House.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907849019\&partnerID=40\&md5=898c8acc3b1d84502c48ccb07f7a78ab}, author = {Lakki, N.a and Habbani, A.a b and El Abbadi, J.a} } @conference {Aoun2014762, title = {A robust crew pairing based on multi-agent Markov decision processes}, booktitle = {2014 2nd World Conference on Complex Systems, WCCS 2014}, year = {2014}, note = {cited By 0}, pages = {762-768}, abstract = {Airline scheduling is a real challenge in the context of the airline industry; this includes a lot of planning and operational decision problems and deals with a large number of interdependent resources. A prominent problem in airline scheduling is crew scheduling, specially pairings or Tour-of-Duty planning problem. The objective is to ensure optimal allocation of crews to flights by specifying the set of pairings that minimize the planned cost. The widely used algorithms assume no disruptions. However, airline operations often undergo stochastic disturbances that have to be taken into account in order to minimize the real operating cost. Recently, great interest has been given to robust crew scheduling with consideration of the stochastic nature of disturbances like technical breakdowns or bad weather conditions. In this paper, we develop a stochastic model of crew pairing problem based on Multi-agent Markov Decision Processes (MMDP); thus, the problem will be treated as finding the optimal policy to adopt in stochastic cases of disturbances. Also, a computational study is conducted to ensure validity of our proposed model. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICoCS.2014.7060940}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929193249\&doi=10.1109\%2fICoCS.2014.7060940\&partnerID=40\&md5=24e45c65c00ba3e4dc4825bd5909386b}, author = {Aoun, O. and El Afia, A.} } @conference { ISI:000380387700063, title = {A Robust Crew Pairing based on Multi-Agent Markov Decision Processes}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {762-768}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {Airline scheduling is a real challenge in the context of the airline industry; this includes a lot of planning and operational decision problems and deals with a large number of interdependent resources. A prominent problem in airline scheduling is crew scheduling, specially pairings or Tour-of-Duty planning problem. The objective is to ensure optimal allocation of crews to flights by specifying the set of pairings that minimize the planned cost. The widely used algorithms assume no disruptions. However, airline operations often undergo stochastic disturbances that have to be taken into account in order to minimize the real operating cost. Recently, great interest has been given to robust crew scheduling with consideration of the stochastic nature of disturbances like technical breakdowns or bad weather conditions. In this paper, we develop a stochastic model of crew pairing problem based on Multi-agent Markov Decision Processes (MMDP); thus, the problem will be treated as finding the optimal policy to adopt in stochastic cases of disturbances. Also, a computational study is conducted to ensure validity of our proposed model.}, isbn = {978-1-4799-4647-1}, author = {Aoun, Oussama and El Afia, Abdellatif} } @conference {Bassiti20141173, title = {Semantic-based framework for innovation management}, booktitle = {Proceedings of the European Conference on Knowledge Management, ECKM}, volume = {3}, year = {2014}, note = {cited By 0}, pages = {1173-1182}, abstract = {In a turbulent and more competition-oriented environment, organizations need to develop their ability to adapt quickly. To be able to adapt, an organization must, of course, be innovative and attractive, and successful innovation should be carefully planned and efficiently managed. Even if some researchers treat the early stages of innovation as "fuzzy" because of their uncertainty, we suppose, as many others that a well structured processes, a clear representation and a good designed management framework are crucial for the ultimate success of any innovation. Understanding the process of innovation is to understand the factors that facilitate and inhibit the success of innovations. A best understanding of how these factors are related will lead to define a proper representation and an efficient management framework supporting the innovation full lifecycle. In addition, organizations have become more aware of external knowledge and technology to maintain their competitiveness in the global market and they feel a growing need to open up their innovation processes. Therefore, to achieve successful innovation in an open context, it is necessary to overcome the challenge of integrating distributed and heterogeneous knowledge management systems. The emerging and rapid development of semantic technologies brings new opportunities and provides a promising direction for such challenge. Semantic technologies focus on the formalization of knowledge to create schemes that allow knowledge to be effectively stored, modified, shared, exchanged and reasoned. In this paper, a semantically-powered framework for innovation management is presented. This framework includes a new Lifecycle Model supporting all activities of innovation with a special focus on its front end stages; a Semantic Representation of innovation, based on three factors we have identified as cornerstones of any successful innovation and that supports a set of automatic and intelligent services such as annotation, relationship retrieval and semantic search; used to power the Core Services of our framework that aim to promote innovation, notably within an open context.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962909325\&partnerID=40\&md5=8b920d5c601c78dbb64678ed6a21c234}, author = {Bassiti, L.E. and Ajhoun, R.} } @conference { ISI:000351503500139, title = {Semantic-Based Framework for Innovation Management}, booktitle = {PROCEEDINGS OF THE 15TH EUROPEAN CONFERENCE ON KNOWLEDGE MANAGEMENT (ECKM 2014), VOLS 1-3}, series = {Proceedings of the European Conference on Knowledge Management}, year = {2014}, note = {15th European Conference on Knowledge Management (ECKM), Polytechn Inst Santarem, Santarem Sch Management \& Technol, Santarem, PORTUGAL, SEP 04-05, 2014}, pages = {1173-1182}, abstract = {In a turbulent and more competition- oriented environment, organizations need to develop their ability to adapt quickly. To be able to adapt, an organization must, of course, be innovative and attractive, and successful innovation should be carefully planned and efficiently managed. Even if some researchers treat the early stages of innovation as {\textquoteleft}{\textquoteleft}fuzzy{{\textquoteright}{\textquoteright}} because of their uncertainty, we suppose, as many others that a well structured processes, a clear representation and a good designed management framework are crucial for the ultimate success of any innovation. Understanding the process of innovation is to understand the factors that facilitate and inhibit the success of innovations. A best understanding of how these factors are related will lead to define a proper representation and an efficient management framework supporting the innovation full lifecycle. In addition, organizations have become more aware of external knowledge and technology to maintain their competitiveness in the global market and they feel a growing need to open up their innovation processes. Therefore, to achieve successful innovation in an open context, it is necessary to overcome the challenge of integrating distributed and heterogeneous knowledge management systems. The emerging and rapid development of semantic technologies brings new opportunities and provides a promising direction for such challenge. Semantic technologies focus on the formalization of knowledge to create schemes that allow knowledge to be effectively stored, modified, shared, exchanged and reasoned. In this paper, a semantically-powered framework for innovation management is presented. This framework includes a new Lifecycle Model supporting all activities of innovation with a special focus on its front end stages; a Semantic Representation of innovation, based on three factors we have identified as cornerstones of any successful innovation and that supports a set of automatic and intelligent services such as annotation, relationship retrieval and semantic search; used to power the Core Services of our framework that aim to promote innovation, notably within an open context.}, isbn = {978-1-910309-35-3}, issn = {2048-8963}, author = {El Bassiti, Lamyaa and Ajhoun, Rachida}, editor = {Vivas, C and Sequeira, P} } @article {Assad2014324, title = {Short: Intrusion detection quality analysis for homogeneous wireless sensor networks}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {8593 LNCS}, year = {2014}, note = {cited By 0}, pages = {324-329}, abstract = {In this paper we analyze the intrusion detection in a homogeneous Wireless Sensor Network that is defined as a mechanism to monitor and detect unauthorized intrusions or anomalous moving attackers in area of interest. The quality of deterministic deployment can be determined sufficiently by analysis, before the deployment. However, when random deployment is required, determining the deployment quality becomes challenging and depends directly on node density. The major question is centered on the network coverage problem, how can we guarantee that each point of the region is covered by the required number of sensors? To deal with this, probabilistic intrusion detection models are adopted, called single and multi sensing probability detection and the deployment quality issue is surveyed and analyzed in terms of coverage. We evaluate our probabilistic model in homogeneous wireless sensor network, in term of sensing range, node density, and intrusion distance. {\textcopyright} 2014 Springer International Publishing.}, doi = {10.1007/978-3-319-09581-3_24}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84905899551\&doi=10.1007\%2f978-3-319-09581-3_24\&partnerID=40\&md5=e868b83abeca52460109a066d8ddde51}, author = {Assad, N.a and Elbhiri, B.b and El Fkihi, S.c and Faqihi, M.A.c and Ouadou, M.a and Aboutajdine, D.a} } @article {Faizi201464, title = {Social media: An optimal virtual environment for learning foreign languages}, journal = {International Journal of Emerging Technologies in Learning}, volume = {9}, number = {5}, year = {2014}, note = {cited By 3}, pages = {64-66}, abstract = {The present paper aims at exploring the potential role that social media technologies play in learning foreign languages. For this purpose, a survey was carried out to examine students{\textquoteright} and language learners{\textquoteright} perceptions and attitudes about using these platforms. Results of the research study revealed that the great majority of the respondents actually use these web-based applications to enhance their language skills. Most importantly, they noted that social media contribute in improving their listening, reading, speaking and writing skills. Accordingly, we strongly recommend that instructors use these online tools in distant, blended, or face-to-face language learning settings.}, doi = {10.3991/ijet.v9i5.3911}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907689668\&doi=10.3991\%2fijet.v9i5.3911\&partnerID=40\&md5=22bc806cdbaebe7f28f0b9664aa83c8f}, author = {Faizi, R. and El Afia, A. and Chiheb, R.} } @conference { ISI:000367082904073, title = {SOCIAL MEDIA AND EDUCATION: FRIENDS OR FOES?}, booktitle = {ICERI2014: 7TH INTERNATIONAL CONFERENCE OF EDUCATION, RESEARCH AND INNOVATION}, series = {ICERI Proceedings}, year = {2014}, note = {7th International Conference of Education, Research and Innovation (ICERI), Seville, SPAIN, NOV 17-19, 2014}, pages = {4410-4414}, abstract = {Though being relatively a recent addition to the internet landscape, social media have witnessed an exponential growth in the last few years. These are used today in every aspect of our personal and professional lives and are infiltrating nearly every field, including education. In fact, students all over the world are greatly immersed in these web-based applications and have recourse to them to interact with friends, play games and watch television and most importantly to learn. Given the increasing use of these technologies for educational purposes, many educators have started questioning the validity and usefulness of such tools in education, stating that they may distract students and disengage them from learning. The objective of this paper is, therefore, to evaluate the use of these social online platforms in education. To this end, a survey was conducted to examine the perceptions and attitudes of higher education students towards using these tools for learning purposes. Results of the research study revealed that 49\% of the informants devote more than 40\% of the time they spend on social media to enhance their learning experience. Concerning the students{\textquoteright} contributions on these social online platforms, it has been noticed that the great majority of the participants in the survey are active members in these networks as they tend to share learning materials and resources. Taking these findings into account, we may claim that social media applications present many educational advantages for students, hence, contribute in providing opportunities for further learning. As such, they provide educators with an opportunity to go beyond traditional delivery formats and design student-centered customized learning environments.}, isbn = {978-84-617-2484-0}, issn = {2340-1095}, author = {Faizi, Rdouan and El Afia, Abdellatif and Chiheb, Raddouane}, editor = {Chova, LG and Martinez, AL and Torres, IC} } @article {Amazal2014, title = {Software development effort estimation using classical and fuzzy analogy: A cross-validation comparative study}, journal = {International Journal of Computational Intelligence and Applications}, volume = {13}, number = {3}, year = {2014}, note = {cited By 4}, abstract = {Software effort estimation is one of the most important tasks in software project management. Of several techniques suggested for estimating software development effort, the analogy-based reasoning, or Case-Based Reasoning (CBR), approaches stand out as promising techniques. In this paper, the benefits of using linguistic rather than numerical values in the analogy process for software effort estimation are investigated. The performance, in terms of accuracy and tolerance of imprecision, of two analogy-based software effort estimation models (Classical Analogy and Fuzzy Analogy, which use numerical and linguistic values respectively to describe software projects) is compared. Three research questions related to the performance of these two models are discussed and answered. This study uses the International Software Benchmarking Standards Group (ISBSG) dataset and confirms the usefulness of using linguistic instead of numerical values in analogy-based software effort estimation models. {\textcopyright} Imperial College Press.}, doi = {10.1142/S1469026814500138}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908567208\&doi=10.1142\%2fS1469026814500138\&partnerID=40\&md5=7cc9c1aeaf80c1739df9a20546ac1885}, author = {Amazal, F.A.a and Idri, A.a and Abran, A.b} } @conference {Ammor2014646, title = {A support model for tutoring to facilitate and automate tutoring interventions}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2014}, note = {cited By 1}, pages = {646-651}, abstract = {Several approaches have been used to improve online learning systems and make training more attractive and beneficial for learners. One of a prominent approach is to the e-Tutoring that can provide a personalized and continuous support aiming to increase learners{\textquoteright} motivation, reduce feelings of isolation and avoid failure and abandonment in training. We offer in our work a support model for tutoring supporting the tutor{\textquoteright}s actions; this model collects and organize the majority of tutoring interventions to be exploited by a tutor agent in order to (1) facilitate the work of human tutor in the reuse of recorded interventions and in the choice of the most suitable to learning context and learner{\textquoteright}s learning profile, and (2) automate certain procedures for alleviate the work of human tutor of repetitive tasks, simple but boring, and for assist him in order to cope with the important number of learners requiring personalized support. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICMCS.2014.6911202}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928715410\&doi=10.1109\%2fICMCS.2014.6911202\&partnerID=40\&md5=df16d9a89e964ca8412394ddb9ff85de}, author = {Ammor, F.-Z.a and Bouzidi, D.b and Elomri, A.a} } @conference { ISI:000366999600115, title = {A Support Model for Tutoring to Facilitate and Automate Tutoring Interventions}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {652-657}, abstract = {Several approaches have been used to improve online learning systems and make training more attractive and beneficial for learners. One of a prominent approach is to the e-Tutoring that can provide a personalized and continuous support aiming to increase learners{\textquoteright} motivation, reduce feelings of isolation and avoid failure and abandonment in training. We offer in our work a support model for tutoring supporting the tutor{\textquoteright}s actions; this model collects and organize the majority of tutoring interventions to be exploited by a tutor agent in order to (1) facilitate the work of human tutor in the reuse of recorded interventions and in the choice of the most suitable to learning context and learner{\textquoteright}s learning profile, and (2) automate certain procedures for alleviate the work of human tutor of repetitive tasks, simple but boring, and for assist him in order to cope with the important number of learners requiring personalized support.}, isbn = {978-1-4799-3824-7}, author = {Fatima-Zahra, Ammor and Driss, Bouzidi and Amina, Elomri} } @article {Benamar2014197, title = {Time to collision (TTC) estimation maps based on optical flow in paracatadioptric vision [Estimation des cartes du temps de collision (TTC) bas{\'e}e sur le flot optique en vision para-catadioptrique]}, journal = {Traitement du Signal}, volume = {31}, number = {1-2}, year = {2014}, note = {cited By 0}, pages = {197-219}, abstract = {The present paper deals with time to collision (TTC) for a mobile robot equipped with a catadioptric camera. This type of cameras is very useful in robotics because it provides a panoramic view field. The time to collision has been extensively studied in the case of perspective cameras. Nevertheless, the methods used are not directly applicable due to strong distortions in the images produced by the omnidirectional camera and they, therefore, need to be adapted. In this work, we propose to compute the TTC thanks to the estimation of the optical flow on omnidirectional images between the robot and the obstacle. We see that the double projection of a 3D point on the mirror and then on the camera plane leads to a new formulation of the TTC for catadioptric cameras. This formulation enables us to know, at each moment and at each pixel of the image, the TTC from the optical flow at this point. Our approach is validated on synthetic data and real experiments. Finally, we show that this calculation can detect obstacles in the motion direction of the robot. {\textcopyright} 2014 Lovoisier.}, doi = {10.3166/ts.31.197-219}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84930250224\&doi=10.3166\%2fts.31.197-219\&partnerID=40\&md5=1ed78d09cceb5f059e5db87be3aa812f}, author = {Benamar, F.Z.a d and Fkihi, S.E.b and Demonceaux, C.c and Mouaddib, E.M.d and Aboutajdine, D.a} } @article {Hamlaoui2014132, title = {Towards a framework for heterogeneous models matching}, journal = {Journal of Software Engineering}, volume = {8}, number = {3}, year = {2014}, note = {cited By 2}, pages = {132-151}, abstract = {The overall goal of our approach is to relate models of a given domain that are created by different actors and thus are generally heterogeneous that is, described in different DSL (Domain Specific Languages). Instead of building a single global model, we propose to organize the different source models as a network of models which provides a global view of the system through a virtual global model. The matching of these models is done in a shared model of correspondences. We focus in this study on the elaboration of the model of correspondences, through a transformation called "refine" . The approach is illustrated by a representative use case (a Bug Tracking System) and supported by a modeling tool called HMS (Heterogeneous Matching Suite). {\textcopyright} 2014 Academic Journals Inc.}, doi = {10.3923/jse.2014.132.151}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84899886336\&doi=10.3923\%2fjse.2014.132.151\&partnerID=40\&md5=2803984c51fbf1cde93310f1e0f01e33}, author = {Hamlaoui, M.E.a b and Ebersold, S.a and Anwar, A.c and Coulette, B.a and Nassar, M.b} } @conference {Khtira2014152, title = {Towards a requirement-based approach to support early decisions in software product line engineering}, booktitle = {2014 2nd World Conference on Complex Systems, WCCS 2014}, year = {2014}, note = {cited By 0}, pages = {152-157}, abstract = {During the creation of a software product, the stakeholders have to take a lot of decisions. These decisions are related to the product scope, requirements priority, architecture assumptions, technological issues, etc. In the domain of Software Product Lines, many studies have dealt with decisions taken during the lifetime of the product, but a little attention has been given to the pre-project decisions. The aim of this paper is to propose a requirement-based framework that capitalizes on the specific products already derived from the product line, in order to support the benchmarking of the possible solutions that respond to a customer{\textquoteright}s needs, and to help the stakeholders of a product take decisions concerning the scope of the product and the requirements to implement. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICoCS.2014.7060993}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988286512\&doi=10.1109\%2fICoCS.2014.7060993\&partnerID=40\&md5=d227ca52b5793a11500365eb9248052c}, author = {Khtira, A. and Benlarabi, A. and El Asri, B.} } @conference { ISI:000380387700116, title = {Towards a Requirement-Based Approach to Support Early Decisions in Software Product Line Engineering}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {152-157}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {During the creation of a software product, the stakeholders have to take a lot of decisions. These decisions are related to the product scope, requirements priority, architecture assumptions, technological issues, etc. In the domain of Software Product Lines, many studies have dealt with decisions taken during the lifetime of the product, but a little attention has been given to the pre-project decisions. The aim of this paper is to propose a requirement-based framework that capitalizes on the specific products already derived from the product line, in order to support the benchmarking of the possible solutions that respond to a customer{\textquoteright}s needs, and to help the stakeholders of a product take decisions concerning the scope of the product and the requirements to implement.}, isbn = {978-1-4799-4647-1}, author = {Khtira, Amal and Benlarabi, Anissa and El Asri, Bouchra} } @conference { ISI:000371484600001, title = {Towards a semantic enrichment of configurable process models}, booktitle = {2014 THIRD IEEE INTERNATIONAL COLLOQUIUM IN INFORMATION SCIENCE AND TECHNOLOGY (CIST{\textquoteright}14)}, series = {Colloquium in Information Science and Technology}, year = {2014}, note = {3rd IEEE International Colloquium on Information Science and Technology (CIST), Tetouan, MOROCCO, OCT 20-22, 2014}, pages = {1-6}, publisher = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, organization = {IEEE; IEEE Comp Soc; IEEE Commun Soc; IEEE Morocco Sect; IEEE Morocco Comp \& Commun Joint Chapter; UAE IEEE Student Branch; Univ Abdelmalek Essaadi; ENSA Tetouan}, abstract = {In recent years, the concept of Semantic business process management has attracted significant research work, with the emergence of semantic web technologies (SW), which aimed to describe resources in a formal representation with clearly defined semantics in order to be machine-readable. Enriching business process with semantics improves the representation of processes and permits the automation of different tasks such as modeling, configuration, evolution, and promotes more flexible and adaptive solutions according to business requirement changes. Nowadays, the key concept of reuse in Business Process Management is the Configurable Process Model (CPM) solution which is an important approach for reusing and customizing business process models with a set of process variants. However, existing approaches in CPM lack of semantic representation. In this paper, we propose an ontology-based approach for enriching Configurable Process Models with semantics, as a first step towards the enhancement of the CPM evolution process.}, isbn = {978-1-4799-5979-2}, issn = {2327-185X}, author = {El Faquih, Loubna and Sbai, Hanae and Fredj, Mounia}, editor = {ElMohajir, M and AlAchhab, M and Chahhou, M} } @conference {Hammani2014146, title = {Towards a variable non-functional requirements integration for component-based product line: A generic approach}, booktitle = {2014 2nd World Conference on Complex Systems, WCCS 2014}, year = {2014}, note = {cited By 0}, pages = {146-151}, abstract = {Software Product Line and Component-Based Software Engineering are two expanding paradigms for developing critical and complex systems. Therefore, these two paradigms seem have complementary strengths. So, significant benefits can be gained from their integration. Despite the fact that Non-Functional Requirements (such as security, performance and reliability) are critical to software systems, and they must be taken into account at early stage of software development life cycle and should be explicitly specified as well as functional requirements; these NFR are overlooked in Software Product line and Component-Based Software Development Processes. So, our main objective is to discuss some recent approaches in the field and to propose a new extension to Software Product Line processes that supports Component-Based Approaches, and integrates Non-Functional Requirement in domain requirements stage. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICoCS.2014.7060992}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988286060\&doi=10.1109\%2fICoCS.2014.7060992\&partnerID=40\&md5=3c43beb6d08dd3ee03dc69958f9d5ee7}, author = {Hammani, F.Z. and Rhanoui, M. and El Asri, B.} } @conference { ISI:000380387700115, title = {Towards a Variable Non-Functional Requirements Integration for Component-Based Product Line}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {146-151}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {Software Product Line and Component-Based Software Engineering are two expanding paradigms for developing critical and complex systems. Therefore, these two paradigms seem have complementary strengths. So, significant benefits can be gained from their integration. Despite the fact that Non-Functional Requirements (such as security, performance and reliability) are critical to software systems, and they must be taken into account at early stage of software development life cycle and should be explicitly specified as well as functional requirements; these NFR are overlooked in Software Product line and Component-Based Software Development Processes. So, our main objective is to discuss some recent approaches in the field and to propose a new extension to Software Product Line processes that supports Component-Based Approaches, and integrates Non-Functional Requirement in domain requirements stage.}, isbn = {978-1-4799-4647-1}, author = {Hammani, Fatima Zahra and Rhanoui, Maryem and El Asri, Bouchra} } @conference { ISI:000380387700080, title = {Towards Flexible and Reusable SaaS for Multi-tenancy To design, implement and bind multi-functional variability for Rich-Variant services}, booktitle = {2014 SECOND WORLD CONFERENCE ON COMPLEX SYSTEMS (WCCS)}, year = {2014}, note = {2014 Second World Conference on Complex Systems (WCCS), Agadir, MOROCCO, NOV 10-12, 2014}, pages = {164-170}, publisher = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, organization = {Ibn Zohr Univ; Moroccan Soc of Complex Syst; IEEE Morocco; Int Acad for Syst and Cybernet Sci IASCYS}, abstract = {Software-as-a-Service (SaaS) is a new delivery model, that involves providing applications to the customer on demand over the Internet. SaaS promotes multi-tenancy as a tool to exploit economies of scale. However, the flexibility of an application to be adapted to individual tenant{\textquoteright}s needs is a major requirement. Thus, we initiate in this paper to an approach proposing a more flexible and reusable SaaS system for multitenancy. This approach consist on integrating a functional variability using Rich-Variant Components with a deployment variability enabling the customers to choose with which others tenants they want or don{\textquoteright}t want to share instances. The approach presented enables exploiting the economies of scale while avoiding the problem of customers{\textquoteright} hesitation about sharing with other tenants.}, isbn = {978-1-4799-4647-1}, author = {Kriouile, Houda and El Asri, Bouchra and El Haloui, M{\textquoteright}barek} } @conference {Laghouaouta2014, title = {On the use of graph transformations for model composition traceability}, booktitle = {Proceedings - International Conference on Research Challenges in Information Science}, year = {2014}, note = {cited By 1}, abstract = {The model composition provides support to build systems based on a set of less complex sub-models. This operation allows managing complexity while supporting the modularity and reusability tasks. Due to the increase number of the involving models, their composition becomes a tedious task. For that, the need for maintaining traceability information is raised to help managing the composition operation. We propose in this work a graph-based model transformations approach, which aims to keep track of the model composition operation. Our objective is to capture traces in an automatic and reusable manner. Finally, a composition scenario is given to demonstrate the feasibility of our proposal. {\textcopyright} 2014 IEEE.}, doi = {10.1109/RCIS.2014.6861075}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906762143\&doi=10.1109\%2fRCIS.2014.6861075\&partnerID=40\&md5=294fe5676734e151d6e8764405acb765}, author = {Laghouaouta, Y.a and Nassar, M.b and Anwar, A.c and Bruel, J.-M.d} } @conference { ISI:000346582400007, title = {Using Markov Decision Processes to Solve Stochastic Gate Assignment Problem}, booktitle = {PROCEEDINGS OF 2014 2ND IEEE INTERNATIONAL CONFERENCE ON LOGISTICS AND OPERATIONS MANAGEMENT (GOL 2014)}, year = {2014}, note = {2nd IEEE International Conference on Logistics Operations Management (GOL), Rabat, MOROCCO, JUN 05-07, 2014}, pages = {42-47}, publisher = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, organization = {IEEE; Mohammed VI Souissi Univ, ENSIAS Sch; Univ Le Havre; Sidi Mohamed Ben Abdellah Univ, FST}, abstract = {Airport traffic often undergoes some random disruptions, which have to be considered to ensure operational flight-gate assignments. Inadequate assignment of gates may result in flight delays that happen in airport operations and must be taken into account. Here, we are using the original algorithm, based on Markov decision process (MDP) to solve the gate assignment problem (GAP) under uncertainty; we include stochastic parameters that depend on probabilities to express fluctuations in flight operations. The use of MDP for modeling will provide for airport controllers a robust solution for the GAP that takes in consideration possible flight delays. This paper gives the corresponding model, which includes strict constraints of the GAP and other soft constraints like choice preferences of gates. We give experimental results on a sample of real data to demonstrate the feasibility and efficiency of our approach.}, isbn = {978-1-4799-4650-1}, author = {Aoun, Oussama and El Afia, Abdellatif}, editor = {Benadada, Y} } @conference {Aoun201442, title = {Using Markov decision processes to solve stochastic gate assignment problem}, booktitle = {Proceedings of 2nd IEEE International Conference on Logistics Operations Management, GOL 2014}, year = {2014}, note = {cited By 0}, pages = {42-47}, abstract = {Airport traffic often undergoes some random disruptions, which have to be considered to ensure operational flight-gate assignments. Inadequate assignment of gates may result in flight delays that happen in airport operations and must be taken into account. Here, we are using the original algorithm, based on Markov decision process (MDP) to solve the gate assignment problem (GAP) under uncertainty; we include stochastic parameters that depend on probabilities to express fluctuations in flight operations. The use of MDP for modeling will provide for airport controllers a robust solution for the GAP that takes in consideration possible flight delays. This paper gives the corresponding model, which includes strict constraints of the GAP and other soft constraints like choice preferences of gates. We give experimental results on a sample of real data to demonstrate the feasibility and efficiency of our approach. {\textcopyright} 2014 IEEE.}, doi = {10.1109/GOL.2014.6887445}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908638693\&doi=10.1109\%2fGOL.2014.6887445\&partnerID=40\&md5=5ca43e35806c14816feb2afcffb9c9c8}, author = {Aoun, O. and El Afia, A.} } @conference {Idri2013483, title = {Assessing RBFN-based software cost estimation models}, booktitle = {Proceedings of the International Conference on Software Engineering and Knowledge Engineering, SEKE}, volume = {2013-January}, number = {January}, year = {2013}, note = {cited By 2}, pages = {483-487}, abstract = {This paper is concerned with the design of the neural networks approach, especially Radial Basis Function Neural (RBFN) networks, for software effort estimation models. The study firstly focuses on the construction of the RBFN middle layer composed of receptive fields, using two clustering techniques: hard C-means and fuzzy C-means. Thereafter, we evaluate and compare the performance of effort estimation models that use an RBFN construction-based either on hard or fuzzy C-means. This study uses the ISBSG dataset and confirms the usefulness of an RBFN-based on fuzzy C-means for software effort estimation. Copyright {\textcopyright} 2013 by Knowledge Systems Institute Graduate School.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84937716255\&partnerID=40\&md5=26b8de65f7c08367f5351ab898bf97a8}, author = {Idri, A.a and Hassani, A.a and Abran, A.b} } @conference {Alfath2013, title = {Cloud computing security: Fine-grained analysis and security approaches}, booktitle = {2013 National Security Days - 3eme Edition Des Journees Nationales de Securite, JNS3}, year = {2013}, note = {cited By 2}, abstract = {Cloud computing is a new paradigm for hosting and delivering services over the internet. In this paper, we aim to pinpoint the challenges and issues related to the Cloud computing security and also to discuss some promising approaches to encounter these concerns. Despite the potential benefits that cloud computing offers, the model security is not mature yet. Indeed, security is considered as the major obstacle to faster and more widespread adoption of cloud computing. In the scope of this paper, we will emphasize the impact of the cloud computing characteristics and properties such as Multitenancy and Elasticity on the security model. We will provide detailed analysis of the risks associated with these properties and also with the Virtualizatkin which is a cornerstone of some cloud implementations. A second part of this paper will be dedicated to discuss some promising approaches to overcome the security issues in the cloud paradigm. {\textcopyright} 2013 IEEE.}, doi = {10.1109/JNS3.2013.6595465}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884810025\&doi=10.1109\%2fJNS3.2013.6595465\&partnerID=40\&md5=2101125789fcc3882958f80933d3acce}, author = {Alfath, A. and Baina, K. and Baina, S.} } @conference { ISI:000327787500010, title = {Cloud Computing Security: Fine-grained analysis and Security approaches}, booktitle = {2013 NATIONAL SECURITY DAYS (JNS3)}, year = {2013}, note = {3rd National Security Days (JNS), Mohammed V Souissi Unvi, Rabat, MOROCCO, APR 26-27, 2013}, publisher = {Assoc Marocaine ConfiAnace Numerique; Ecole Nationale Superieure Informatique \& Analyse Syst; Informat Secur Res Team; IEEE Morocco Sect; Bank Al Maghrib; Natl Ctr Sci \& Technol Res}, organization = {Assoc Marocaine ConfiAnace Numerique; Ecole Nationale Superieure Informatique \& Analyse Syst; Informat Secur Res Team; IEEE Morocco Sect; Bank Al Maghrib; Natl Ctr Sci \& Technol Res}, abstract = {Cloud computing is a new paradigm for hosting and delivering services over the internet. In this paper, we aim to pinpoint the challenges and issue related to the Cloud computing security and also to discuss some promising approaches to encounter these concerns. Despite the potential benefits that cloud computing offers, the model security is not mature yet. Indeed. security is considered as the major obstacle to faster and more widespread adoption of cloud computing. In the scope of this paper, we will emphasize the impact of the cloud computing characteristics and properties such as Multitenancy and Elasticity on the security model. We will provide detailed analysis of the risks associated with these properties and also with the Virtualization which is a cornerstone of some cloud implementations. A second part of this paper will be dedicated to discuss some promising approaches to overcome the security issues in the cloud paradigm.}, isbn = {978-1-4799-0324-5}, author = {Alfath, Abdeladim and Baina, Karim and Baina, Salah} } @conference { ISI:000326538300126, title = {Combined optimization of shipping and storage costs in a multi-product and multi-level supply chain, under a stochastic demand}, booktitle = {2013 5TH INTERNATIONAL CONFERENCE ON MODELING, SIMULATION AND APPLIED OPTIMIZATION (ICMSAO)}, year = {2013}, note = {5th International Conference on Modeling, Simulation and Applied Optimization (ICMSAO), Hammamet, TUNISIA, APR 28-30, 2013}, abstract = {The increasing need for optimality in the presence of uncertainty motivates the development and application of Model Predictive Control. In this paper we apply the Stochastic Model Predictive Control to optimize the cost of storage and transport for a multi-product and a multi-level supply chain under a stochastic demand. We use the dynamic programming to find the control policies resolving the problem.}, isbn = {978-1-4673-5814-9; 978-1-4673-5812-5}, author = {Tikito, Kawtar and Achchab, Said and Benadada, Youssef} } @conference {Tikito2013, title = {Combined optimization of shipping and storage costs in a multi-product and multi-level supply chain, under a stochastic demand}, booktitle = {2013 5th International Conference on Modeling, Simulation and Applied Optimization, ICMSAO 2013}, year = {2013}, note = {cited By 0}, abstract = {The increasing need for optimality in the presence of uncertainty motivates the development and application of Model Predictive Control. In this paper we apply the Stochastic Model Predictive Control to optimize the cost of storage and transport for a multi-product and a multi-level supply chain under a stochastic demand. We use the dynamic programming to find the control policies resolving the problem. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ICMSAO.2013.6552666}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881436213\&doi=10.1109\%2fICMSAO.2013.6552666\&partnerID=40\&md5=488c37ee005c9d8928f4d1d35738cbe3}, author = {Tikito, K. and Achchab, S. and Benadada, Y.} } @article {Aouinatou2013171, title = {On the conventional public key in identification-based encryption: The case of RSA}, journal = {International Journal of Information and Computer Security}, volume = {5}, number = {3}, year = {2013}, note = {cited By 1}, pages = {171-201}, abstract = {This paper rely on the traditional method RSA-OAEP with an identification-based encryption. Firstly, we make a little change in the basic idea (Boneh et al., 2002; Ding and Tsudik, 2003) in order to create a scheme with which we can sign and crypt at same time. A simple comparison prove that our signature is equivalent to the signature of Shamir. Then, we propose a new approach, which conserve properly the RSA classic. In this paper we will also respond to the question of Xuhua Ding and Gene Tsudik, to propose a convenient exponent for an RSA-IBE. {\textcopyright} 2013 Inderscience Enterprises Ltd.}, doi = {10.1504/IJICS.2013.055837}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84882653997\&doi=10.1504\%2fIJICS.2013.055837\&partnerID=40\&md5=9f48d3394236ca7149104e451bdb8230}, author = {Aouinatou, R.a and Belkasmi, M.b} } @conference {Ouahed201333, title = {A discovery service for automatic composition of web services oriented-agent}, booktitle = {Proceedings of the Workshop on Enabling Technologies: Infrastructure for Collaborative Enterprises, WETICE}, year = {2013}, note = {cited By 1}, pages = {33-35}, abstract = {In this paper we present an approach for automatic web service composition. The process of composition considered includes two layers; the discovery layer which aims to generate a composition schema, and the execution layer which uses a multi-agent system to select web services and execute the composite service. The main focus of this work is on the discovery layer where we propose an approach to optimize the search of the composite service. Firstly, a dependency graph is created by matching the I/O parameters of request and all available web services in register. Then a backward search is performed on this graph to choose the composition schema. The principle used for choosing services is based on the proportion of input and output parameters. Services with the highest proportion are preferred, allowing us to minimize the number of services participating in the solution. Experimentation and performance evaluation of the suggested approach are given at the end of this paper. {\textcopyright} 2013 IEEE.}, doi = {10.1109/WETICE.2013.16}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883530751\&doi=10.1109\%2fWETICE.2013.16\&partnerID=40\&md5=58ccbaef94e0989981dec058cdee4e71}, author = {Ouahed, A.K.E.a and Erradi, M.b and Azzoune, H.c} } @conference {Alrajeh2013284, title = {Enterprise architecture based assessment of healthcare information system strategic alignment}, booktitle = {HEALTHINF 2013 - Proceedings of the International Conference on Health Informatics}, year = {2013}, note = {cited By 0}, pages = {284-289}, abstract = {Traditionally, most Information Technology (IT) implementations in healthcare are concerned primarily with improving the efficiency of operational tasks, without considering the effectiveness of the strategic management decision processes. This paper objective aims at filling this lack by presenting a new approach for Assessment of Healthcare Information Systems (HIS) Strategic Alignment. The underlined concepts are based on Enterprise Architecture (EA) related concepts, providing a clear and comprehensive view of the structure and operations of the healthcare system. This paper focus on how to carry out an internal EA analysis that aims at measuring HIS alignment via a set of metrics determining if the business processes, sub-processes, applications and databases are actually achieving their purpose. This will enable all parties involved in the HIS management process to stay abreast of what has been really attained, which goals are being met, and what needs to be changed.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84877967927\&partnerID=40\&md5=f7caf4bf2dbaad611f6cafb51153181a}, author = {Alrajeh, N.a and Elhari, K.b and Bounabat, B.b} } @conference { ISI:000350287800005, title = {ESB Based Communication In the Connectivist Learning Environment -CLE-}, booktitle = {2013 3RD INTERNATIONAL SYMPOSIUM ISKO-MAGHREB}, year = {2013}, note = {ISKO-Maghreb 3rd International Symposium, Marrakech, MOROCCO, NOV 08-09, 2013}, publisher = {ISKO}, organization = {ISKO}, abstract = {CLE {[}1] {\textquoteleft}{\textquoteleft}Connectivist Learning Environment{{\textquoteright}{\textquoteright}} is a clouded and ubiquitous learning environment conceived by our research team LeRMA {\textquoteleft}{\textquoteleft}Learning and Research in Mobile Age{{\textquoteright}{\textquoteright}}. This environment adopts the connectivism as a pedagogical approach and aims to construct knowledge through collaboration and communication between heterogeneous communities composed of humans and smart objects. The objective of our work is to provide for our ubiquitous environment CLE a service-based communication (where each actor in the environment is represented by a set of services). As an implementation, we opted to use an ESB {\textquoteleft}{\textquoteleft}Enterprise Service Bus{{\textquoteright}{\textquoteright}} in the SOA layer of CLE. This ESB includes the actors{\textquoteright} services, business services, SLA services for a better quality of service and the bus will be used for connectivity, routing, processing and conversion. Through this proposition, we ensure a better communication between nodes (humans, smart objects and controller unit) and an efficient knowledge construction.}, isbn = {978-1-4799-3392-1}, author = {Belahcen, Anas and Abik, Mounia and Ajhoun, Rachida} } @conference {Belarcen2013, title = {ESB based communication in the connectivist learning environment - CLE}, booktitle = {2013 3rd International Symposium ISKO-Maghreb}, year = {2013}, note = {cited By 0}, abstract = {CLE [1] {\guillemotleft}Connectivist Learning Environment{\guillemotright} is a clouded and ubiquitous learning environment conceived by our research team LeRMA {\guillemotleft} Learning and Research in Mobile Age{\guillemotright}. This environment adopts the connectivism as a pedagogical approach and aims to construct knowledge through collaboration and communication between heterogeneous communities composed of humans and smart objects. The objective of our work is to provide for our ubiquitous environment CLE a service-based communication (where each actor in the environment is represented by a set of services). As an implementation, we opted to use an ESB {\guillemotleft}Enterprise Service Bus{\guillemotright} in the SOA layer of CLE. This ESB includes the actors{\textquoteright} services, business services, SLA services for a better quality of service and the bus will be used for connectivity, routing, processing and conversion. Through this proposition, we ensure a better communication between nodes (humans, smart objects and controller unit) and an efficient knowledge construction. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ISKO-Maghreb.2013.6728110}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84894197248\&doi=10.1109\%2fISKO-Maghreb.2013.6728110\&partnerID=40\&md5=a6cf6372bc7f3917f7874bbb0e36fbe0}, author = {Belarcen, A. and Abik, M. and Ajroun, R.} } @conference { ISI:000352386800113, title = {An exact method for solving the buffer sizing and inspection stations allocations problem}, booktitle = {PROCEEDINGS OF 2013 INTERNATIONAL CONFERENCE ON INDUSTRIAL ENGINEERING AND SYSTEMS MANAGEMENT (IEEE-IESM 2013)}, year = {2013}, note = {5th International Conference on Industrial Engineering and Systems Management (IEEE IESM), Mohammadia Sch Engn, Rabat, MOROCCO, OCT 28-30, 2013}, pages = {775-780}, publisher = {IEEE; IEEE Morocco Sect; I4e2; Univ Valenciennes Hannaut Cambresis; Univ Mohammed V Agdal Rabat; CNRST; CISIT; ENIM; ESITH; GDR MACS; LAMIH CNRS; IFSTTAR; IRT RAILENIUM; TEMPO}, organization = {IEEE; IEEE Morocco Sect; I4e2; Univ Valenciennes Hannaut Cambresis; Univ Mohammed V Agdal Rabat; CNRST; CISIT; ENIM; ESITH; GDR MACS; LAMIH CNRS; IFSTTAR; IRT RAILENIUM; TEMPO}, abstract = {An unreliable single part type transfer line with fixed inter machine buffer sizes is considered. In general, imperfect machines operating with imperfect raw material, or partially processed raw material, will result in the production of a mix of conforming and non conforming parts. The problem of optimal joint assignment of buffer sizes and inspection station positions is here considered where we assume that defective parts are scrapped upon detection. The performance measure to be optimized is a combination of work in process storage and parts inspection costs, with an eye to determining the adequate number of inspection stations. We propose an exact method for solving this problem and large-scale numerical experiments are provided to demonstrate the efficiency of the proposed algorithm. Interesting properties of the problem are also denoted based on our empirical results.}, isbn = {978-2-9600532-4-1}, author = {Ouzineb, Mohammed and Mhada, Fatima and El Hallaoui, Issmail and Pellerin, Robert}, editor = {Aboutajdine, D and Skalli, A and Benchekroun, B and Artiba, A} } @article {9171244020131001, title = {Exploring the Potential Benefits of Using Social Media in Education.}, journal = {International Journal of Engineering Pedagogy}, volume = {3}, number = {4}, year = {2013}, pages = {50 - 53}, abstract = {The objective of this work is to investigate the potential benefits of using social media in education. A thorough examination of a large set of these online tools has revealed that social media have many educational advantages. In fact, it has been found out that these web-based applications can improve communication among students and between teachers and students. Thanks to these technologies, both teachers and students can interact with each other in a matter of seconds. Social media can also be used to promote students{\textquoteright} engagement. Students who often complain of being intimidated or bored in the classroom may feel comfortable to express their creativity and voice their opinion on a social network website. Another finding of this study is that social media applications foster collaboration as they allow students to work together to achieve a common goal. Given these educational benefits, we recommend that these online social tools should be used in learning environments. [ABSTRACT}, keywords = {Benefits, collaboration, communication, Computer assisted instruction, education, engagement, Internet in education, Online social networks, social media, Social networks, Teaching aids \& devices}, issn = {21924880}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=91712440\&site=ehost-live}, author = {Faizi, Rdouan and El Afia, Abdellatif and Chiheb, Raddouane} } @conference {Benhaddi2013481, title = {Formalization of the user centric SOA approach: Implementation and end user satisfaction evaluation}, booktitle = {ICEIS 2013 - Proceedings of the 15th International Conference on Enterprise Information Systems}, volume = {2}, year = {2013}, note = {cited By 0}, pages = {481-488}, abstract = {User-centric SOA is a new paradigm allowing unskilled end users to compose services to create new one. Mashups represent new agile and quick ways to compose and integrate structured and unstructured resources, from different types existing on the web. Mashups emerged as a new way to democratize the SOA and realize the user-centric SOA; However, Mashups are emerging applications, and thus consist of immature, non intuitive and non formalized area. In this paper, we formalize the user-centric SOA development by proposing a new cloud-based architecture for user-centric SOA platforms, and by introducing a new rich integration language based on the advanced Enterprise Integration Patterns (EIPS). We also propose a new intuitive and self-explanatory semantic methodology and interaction model for end users services integration. Through these contributions, we give the promise of realizing the user-centric SOA.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887701912\&partnerID=40\&md5=714ea5a40434e19b7ab20fc17a7e8708}, author = {Benhaddi, M.a and Ba{\"\i}na, K.b and Abdelwahed, E.H.a} } @conference { ISI:000386611100055, title = {Formalization of the User Centric SOA Approach Implementation and End User Satisfaction Evaluation}, booktitle = {ICEIS: PROCEEDINGS OF THE 15TH INTERNATIONAL CONFERENCE ON ENTERPRISE INFORMATION SYSTEMS - VOL 2}, year = {2013}, note = {15th International Conference on Enterprise Information Systems (ICEIS), Angers, FRANCE, JUL 04-07, 2013}, pages = {481-488}, publisher = {ESEO Grp; Inst Syst \& Technologies Informat, Control \& Commun; Assoc Advancement Artificial Intelligence; IEICE Special Interest Grp Software Interprise Modelling; ACM Special Interest Grp Management Informat Syst; ACM Special Interest Grp Comp Human Inte}, organization = {ESEO Grp; Inst Syst \& Technologies Informat, Control \& Commun; Assoc Advancement Artificial Intelligence; IEICE Special Interest Grp Software Interprise Modelling; ACM Special Interest Grp Management Informat Syst; ACM Special Interest Grp Comp Human Inte}, abstract = {User-centric SOA is a new paradigm allowing unskilled end users to compose services to create new one. Mashups represent new agile and quick ways to compose and integrate structured and unstructured resources, from different types existing on the web. Mashups emerged as a new way to democratize the SOA and realize the user-centric SOA; However, Mashups are emerging applications, and thus consist of immature, non intuitive and non formalized area. In this paper, we formalize the user-centric SOA development by proposing a new cloud-based architecture for user-centric SOA platforms, and by introducing a new rich integration language based on the advanced Enterprise Integration Patterns (EIPS). We also propose a new intuitive and self-explanatory semantic methodology and interaction model for end users services integration. Through these contributions, we give the promise of realizing the user-centric SOA.}, isbn = {978-989-8565-60-0}, doi = {10.5220/0004446304810488}, author = {Benhaddi, Meriem and Baina, Karim and Abdelwahed, El Hassan}, editor = {Hammoudi, S and Maciaszek, L and Cordeiro, J and Dietz, J} } @conference { ISI:000351597600002, title = {GRADIENT-BASED TIME TO CONTACT ON PARACATADIOPTRIC CAMERA}, booktitle = {2013 20TH IEEE INTERNATIONAL CONFERENCE ON IMAGE PROCESSING (ICIP 2013)}, series = {IEEE International Conference on Image Processing ICIP}, year = {2013}, note = {20th IEEE International Conference on Image Processing (ICIP), Melbourne, AUSTRALIA, SEP 15-18, 2013}, pages = {5-9}, publisher = {Inst Elect \& Elect Engineers; IEEE Signal Proc Soc}, organization = {Inst Elect \& Elect Engineers; IEEE Signal Proc Soc}, abstract = {The problem of time to contact or time to collision (TTC) estimation is largely discussed in perspective images. However, a few works have dealt with images of catadioptric sensors despite of their utility in robotics applications. The objective of this paper is to develop a novel model for estimating TTC with catadioptric images relative to a planar surface, and to demonstrate that TTC can be estimated only with derivative brightness and image coordinates. This model, called {\textquoteleft}{\textquoteleft}gradient based time to contact{{\textquoteright}{\textquoteright}}, does not need high processing such as explicit estimation of optical flow and feature detection and/or tracking. The proposed method allows to estimate TTC and gives additional information about the orientation of planar surface. It was tested on simulated and real datasets.}, isbn = {978-1-4799-2341-0}, issn = {1522-4880}, author = {Benamar, F. and El Fkihi, S. and Demonceaux, C. and Mouaddib, E. and Aboutajdine, D.} } @conference {Benamar20135, title = {Gradient-based time to contact on paracatadioptric camera}, booktitle = {2013 IEEE International Conference on Image Processing, ICIP 2013 - Proceedings}, year = {2013}, note = {cited By 2}, pages = {5-9}, abstract = {The problem of time to contact or time to collision (TTC) estimation is largely discussed in perspective images. However, a few works have dealt with images of catadioptric sensors despite of their utility in robotics applications. The objective of this paper is to develop a novel model for estimating TTC with catadioptric images relative to a planar surface, and to demonstrate that TTC can be estimated only with derivative brightness and image coordinates. This model, called {\textquoteright}gradient based time to contact{\textquoteright}, does not need high processing such as explicit estimation of optical flow and feature detection and/or tracking. The proposed method allows to estimate TTC and gives additional information about the orientation of planar surface. It was tested on simulated and real datasets. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ICIP.2013.6738002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84897697160\&doi=10.1109\%2fICIP.2013.6738002\&partnerID=40\&md5=2d096a6f98a59c3c442f3c24ffca4bbc}, author = {Benamar, F.a d and El Fkihi, S.a b and Demonceaux, C.c and Mouaddib, E.d and Aboutajdine, D.a} } @article {Anwar20131, title = {A graphical specification of model composition with triple graph grammars}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {7706 LNCS}, year = {2013}, note = {cited By 1}, pages = {1-18}, abstract = {The concept of model is widely used in the MDE approach in order to represent several software artifacts, which are handled by different development teams throughout the software life cycle. Management of these models requires the definition of a set of model management operators such as the composition operation. This is generally an operation, which is dedicated to merge a number of models in order to create one (or more) incorporated model and which depends on the application context. The current work focuses on the description of a formal approach for model composition where the composition is specified by a set of graph transformation rules over UML-compliant models. The semantics of our composition operator is defined using triple graph grammars (TGGs) formalism. Furthermore, we present a composition scheme interpreted by a three-steps composition process: matching, checking and merging. {\textcopyright} 2013 Springer-Verlag.}, doi = {10.1007/978-3-642-38209-3_1}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883434558\&doi=10.1007\%2f978-3-642-38209-3_1\&partnerID=40\&md5=d6c0f27b3631bd269a86415547a1dcb1}, author = {Anwar, A.a and Benelallam, A.b and Nassar, M.b and Coulette, B.c} } @conference {ElHamlaoui2013181, title = {Heterogeneous models matching for consistency management}, booktitle = {ENASE 2013 - Proceedings of the 8th International Conference on Evaluation of Novel Approaches to Software Engineering}, year = {2013}, note = {cited By 1}, pages = {181-188}, abstract = {The overall goal of our approach is to relate models of a given domain. Those models are manipulated by different actors, and are thus generally heterogeneous, that is, described with different DSLs (Domain Specific Languages). Instead of building a single global model, we propose to organize the different source models as a network of models, which provides a global view of the system through a virtual global model. The matching of these models is done in a unique correspondence model composed of relationships that are instantiated from a correspondence meta-model. This meta-model is composed of a generic part - common to all the domains - And of a specific part which depends on the specific domain modelled. In this paper, we focus on the elaboration of the correspondence model based on a correspondence meta-model, through a vertical relationship named "refine". The approach is illustrated on a representative use case (a Bug Tracking System). Copyright {\textcopyright} 2013 SCITEPRESS.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887115653\&partnerID=40\&md5=a72804a8195b6d08e4fbdcb715cfd079}, author = {El Hamlaoui, M.a b and Ebersold, S.a and Coulette, B.a and Anwar, A.c and Nassar, M.b} } @conference {Anter2013201, title = {The hybrid integration system Towards a new approach for creating candidate views for materialization}, booktitle = {2013 5th International Conference on Computer Science and Information Technology, CSIT 2013 - Proceedings}, year = {2013}, note = {cited By 0}, pages = {201-209}, abstract = {The vulgarization of information technologies and telecommunications has generated an enormous amount of information. This information is generally heterogeneous, stored in autonomous and distributed sources. Thus, it becomes necessary to introduce the information integration systems. These systems must ensure an optimal query response time, and the freshness of data. Using a virtual approach cannot answer these questions. On the one hand, the query response time is very important. Indeed, the mediator must access, every time, to the sources for load the relevant information. On the other hand, the sources are not always available. The establishment of a hybrid integration system, where a portion of information is materialized in the mediator and the other portion remains in the sources and are extracted at query time, is an effective solution to these problem, provided that the materialized part has carefully chosen. Based on the distribution of user queries, we present in this paper an approach to select the information most requested by users and organize it as candidate views for materialization in the mediator. {\textcopyright} 2013 IEEE.}, doi = {10.1109/CSIT.2013.6588780}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884866157\&doi=10.1109\%2fCSIT.2013.6588780\&partnerID=40\&md5=901ef3c27b658be16f920570e0dd9cce}, author = {Anter, S. and Zellou, A. and Idri, A.} } @conference { ISI:000332960400032, title = {The hybrid integration system Towards a new approach for creating candidate views for materialization}, booktitle = {2013 5TH INTERNATIONAL CONFERENCE ON COMPUTER SCIENCE AND INFORMATION TECHNOLOGY (CSIT)}, series = {International Conference on Computer Science and Information Technology}, year = {2013}, note = {5th International Conference on Computer Science and Information Technology (CSIT), Appl Sci Private Univ, Amman, JORDAN, MAR 27-28, 2013}, pages = {201-209}, publisher = {Appl Sci Private Univ, Fac Informat Technol}, organization = {Appl Sci Private Univ, Fac Informat Technol}, abstract = {The vulgarization of information technologies and telecommunications has generated an enormous amount of information. This information is generally heterogeneous, stored in autonomous and distributed sources. Thus, it becomes necessary to introduce the information integration systems. These systems must ensure an optimal query response time, and the freshness of data. Using a virtual approach cannot answer these questions. On the one hand, the query response time is very important. Indeed, the mediator must access, every time, to the sources for load the relevant information. On the other hand, the sources are not always available. The establishment of a hybrid integration system, where a portion of information is materialized in the mediator and the other portion remains in the sources and are extracted at query time, is an effective solution to these problem, provided that the materialized part has carefully chosen. Based on the distribution of user queries, we present in this paper an approach to select the information most requested by users and organize it as candidate views for materialization in the mediator.}, isbn = {978-1-4673-5825-5}, issn = {2381-3458}, author = {Anter, Samir and Zellou, Ahmed and Idri, Ali} } @conference {Allouch2013112, title = {Improvement of QoS in IMS network using network coding}, booktitle = {ACM International Conference Proceeding Series}, year = {2013}, note = {cited By 0}, pages = {112-117}, abstract = {Network coding provides a powerful and effective mechanism, instead of sending packets directly on the channel, the end node is allowed to combine and encode one or more packages before sending them. This process improves the speed of the performance, the rate of the transmission. In this paper, we present a survey of variants of network coding; we present the advantage of network coding in network convergence to IP Multimedia Subsystem (IMS), the classification of network codes, with their advantage. We show by analyzing the strategy of Network Coding, and the performance of the implementation of network coding in terms of messaging and media streaming in the generation IP Multimedia Subsystem network (IMS). We conclude that network coding can improve throughput, minimize transmission delay and minimize energy consumption in the networks of future generations IMS. Following the Network Coding can offer exciting possibilities for efficient transmission and helps raise the growing challenges of different types of applications on the Internet and media convergence based on IP IMS network. {\textcopyright} 2013 ACM.}, doi = {10.1145/2536853.2536918}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84897660656\&doi=10.1145\%2f2536853.2536918\&partnerID=40\&md5=6468954e6cbb6fc58e9e71808c10f37a}, author = {Allouch, H. and Belkasmi, M.} } @conference {Belahcen2013, title = {Knowledge construction in the Connectivist Learning Environment - CLE}, booktitle = {2013 12th International Conference on Information Technology Based Higher Education and Training, ITHET 2013}, year = {2013}, note = {cited By 0}, abstract = {Technological advances have brought great changes in all areas, including education. The distance learning evolution (D-learning, E-learning and M-Learning) has prompted the adoption of the most interesting pedagogical approaches such as constructivism and social-constructivism. The adoption of these pedagogical trends improved the quality of learning by providing the personalization of learning and collaborative learning. Through this technological evolution and with the development of the Web 2.0, a new pedagogical approach called Connectivism has emerged [1]. It{\textquoteright}s a promising pedagogical approach that covers learning in heterogeneous communities (humans or not) and is based on the contribution of new technologies. However, Web 2.0 is not efficient to reach distributed knowledge in networks in a smart way and where the ability to learn becomes more and more important [2]. The objective of our works is to conceive and implement CLE {\guillemotleft}Connectivist Learning Environment{\guillemotright} which is a clouded and ubiquitous learning environment. The intended purpose is to adopt connectivism as a pedagogical approach in order to construct knowledge through collaboration between heterogeneous communities composed of humans and intelligent objects. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ITHET.2013.6671027}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84893451279\&doi=10.1109\%2fITHET.2013.6671027\&partnerID=40\&md5=04504dab6e5719c4d0224f11f51122e9}, author = {Belahcen, A. and Abik, M. and Ajhoun, R.} } @conference { ISI:000334934700030, title = {Knowledge Construction In the Connectivist Learning Environment -CLE-}, booktitle = {2013 12TH INTERNATIONAL CONFERENCE ON INFORMATION TECHNOLOGY BASED HIGHER EDUCATION AND TRAINING (ITHET 2013)}, series = {International Conference on Information Technology Based Higher Education and Training}, year = {2013}, note = {12th International Conference on Information Technology Based Higher Education and Training (ITHET), Antalya, TURKEY, OCT 10-12, 2013}, abstract = {Technological advances have brought great changes in all areas, including education. The distance learning evolution (D-learning, E-learning and M-Learning) has prompted the adoption of the most interesting pedagogical approaches such as constructivism and social-constructivism. The adoption of these pedagogical trends improved the quality of learning by providing the personalization of learning and collaborative learning. Through this technological evolution and with the development of the Web 2.0, a new pedagogical approach called Connectivism has emerged {[}1]. It{\textquoteright}s a promising pedagogical approach that covers learning in heterogeneous communities (humans or not) and is based on the contribution of new technologies. However, Web 2.0 is not efficient to reach distributed knowledge in networks in a smart way and where the ability to learn becomes more and more important {[}2]. The objective of our works is to conceive and implement CLE {\textquoteleft}{\textquoteleft}Connectivist Learning Environment{{\textquoteright}{\textquoteright}} which is a clouded and ubiquitous learning environment. The intended purpose is to adopt connectivism as a pedagogical approach in order to construct knowledge through collaboration between heterogeneous communities composed of humans and intelligent objects.}, isbn = {978-1-4799-0086-2}, issn = {2380-1603}, author = {Belahcen, Anas and Abik, Mounia and Ajhoun, Rachida} } @article {Anter2013158, title = {K-Schema: A new approach, based on the distribution of user queries, to create views to materialize in a hybrid integration system}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {47}, number = {1}, year = {2013}, note = {cited By 2}, pages = {158-170}, abstract = {The explosion of information technologies and telecommunications has made easy the access and production of information. That is how a very large mass of the latter has generated. This situation has made the integration systems a major need. Among these systems, there is the hybrid mediator. The latter interrogates one part of data on demand as in the virtual approach while charging, filtering and storing the second part, as views, in a local database. The choice of this second part is a critical task. This paper presents a selective approach, which based, essentially, to create these views, on the queries previously posed on the system. Based on the distribution of previous user queries, our approach extract all data most queried by users. The obtained data are classified as candidate views for materialization. Then selecting which one to materialize among all those created in the first step. {\textcopyright} 2005 - 2013 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84872374571\&partnerID=40\&md5=c84d7e37d38a4f230cdf4c3a43c22954}, author = {Anter, S. and Zellou, A. and Idri, A.} } @article {10115338320131101, title = {L{\textquoteright}{\'e}quipe de recherche IMS.}, journal = {E-Ti: E-Review in Technologies Information}, number = {7}, year = {2013}, pages = {91 - 93}, abstract = {The research works of the IMS team involve the model-driven engineering and the multisystem modeling. This team, which brings together researchers from various specialties, contributes to three strategic axes of the SIME laboratory: The construction of contextual and collaborative systems, the safe and variable development and reuse and complex systems urbanization. Finally, the IMS team works with different teams (MACAO IGSI EAS) through coframes doctoral theses and joint projects. (English) [ABSTRACT FROM AUTHOR]}, keywords = {components, composants, contextual services, IDM, ligne de produits logiciels, MDE, R{\'e}utilisation, services contextuels, software products line, Software reuse, urbanisation, urbanization}, issn = {11148802}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=101153383\&site=ehost-live}, author = {El Asri, Bouchra} } @article {Anter2013816, title = {MATHIS: A new approach for creating views to materialize in a hybrid integration system}, journal = {International Review on Computers and Software}, volume = {8}, number = {3}, year = {2013}, note = {cited By 2}, pages = {816-825}, abstract = {The vulgarization of information and telecommunications technologies has made the integration systems an immediate necessity. Among the latter, there are the hybrid integration systems. These offer a local database where it stores a part of data while integrating the other part virtually. This in the objective to increases system performance while ensuring a tradeoff between query response time and data freshness. The approach that has proposed a complete solution is one that creates the candidate views for materialization before selecting among them those that will be materialized. To do this, it selects the attributes most requested by users. Calling the algorithm k-schema, these attributes are organized in schemas, on which we based to build the candidate views for materialization. This approach suffers from a defect. It is because in the phase of the extraction of attributes of interest, we based only on their frequencies of appearance in the user queries. This will select some attributes, even if they do not respond to any selection criterion. Thus, they cause the elimination, in the selection phase, of views to which they were assigned. In this paper, we propose a new approach, which eliminates all attributes that do not respond to the selection criteria prior to the creation of views to materialize. We also propose a new solution to calculate the values of attributes relative to different criteria. {\textcopyright} 2013 Praise Worthy Prize S.r.l. - All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881501064\&partnerID=40\&md5=5b529bb907d58895a51faec2468f9a6c}, author = {Anter, S. and Zellou, A. and Idri, A.} } @article {Ayadi201372, title = {Memetic algorithm for a multi-objective vehicle routing problem with multiple trips}, journal = {International Journal of Computer Science and Applications}, volume = {10}, number = {2}, year = {2013}, note = {cited By 1}, pages = {72-91}, abstract = {This paper considers a variant of the vehicle routing problem where vehicles are allowed to take more than one route during the working day. A restricted fleet size is used in order to serve customers{\textquoteright} demand. Therefore, vehicles could exceed the time horizon. The objective is to optimize both maximum overtime and routing cost. A mathematical model has been proposed for the VRPM. A memetic algorithm has been developed to solve it by combining a genetic algorithm based on suitable encoding and genetic operators, with a local search procedure. The algorithm is run over a benchmark of the VRPM. Comparing with seven existing algorithms, our approach shows competitive performance and contributes many new best solutions. {\textcopyright} Technomathematics Research Foundation.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84888047908\&partnerID=40\&md5=8b8db1347e09567c32c9a37e1aa3e164}, author = {Ayadi, R. and Benadada, Y.} } @article {9342527920130601, title = {MULTIPLE EQUIVALENT SIMULTANEOUS OFFERS STRATEGY IN AN AGENT-BASED GRID RESOURCE BROKERING SYSTEM - INITIAL CONSIDERATIONS.}, journal = {Scalable Computing: Practice \& Experience}, volume = {14}, number = {2}, year = {2013}, pages = {83 - 94}, abstract = {The Agent in Grid (AiG) project attempts to integrate the concept of the Grid and an agent-based system to facilitate efficient resource management in the Grid. In this paper, we present preliminary considerations concerning multiple equivalent simultaneous offers strategy that can be used in the Service Level Agreement (SLA) negotiations. These negotiations are the key part of the main use case scenarios within the AiG project. In this context, first, we describe the AiG system. Second, we introduce the simultaneous offer strategy, as a mechanism known from economy, and suggest an approach for using it in the AiG negotiations. [ABSTRACT FROM AUTHOR]}, keywords = {agent-based negotiations, AiG, Computational grids (Computer systems), Economics {\textendash} Study \& teaching, Gateways (Computer networks), Grid computing, multi-attribute negotiations, multiple equivalent simultaneous offers, Service level agreements, SLA, Social psychology {\textendash} Study \& teaching}, issn = {18951767}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=93425279\&site=ehost-live}, author = {Attaoui, Naoual and Ganzha, Maria and Paprzycki, Marcin and Wasielewska, Katarzyna and Essaaidi, Mohammad} } @article {Attaoui201383, title = {Multiple equivalent simultaneous offers strategy in an agent-based grid resource brokering system-initial considerations}, journal = {Scalable Computing}, volume = {14}, number = {2}, year = {2013}, note = {cited By 0}, pages = {83-94}, abstract = {The Agent in Grid (AiG) project attempts to integrate the concept of the Grid and anagent-based system to facilitate efficient resource management in the Grid. In this paper, we present preliminary considerations concerning multiple equivalent simultaneous offers strategy that can be used in the Service Level Agreement (SLA) negotiations. These negotiations are the key part of the main use case scenarios within the AiG project. In this context, first, we describe the AiG system. Second, we introduce the simultaneous offer strategy, as a mechanism known from economy, and suggest an approach for using it in the AiG negotiations. {\textcopyright} 2013 SCPE.}, doi = {10.12694/scpe.v14i2.842}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884175155\&doi=10.12694\%2fscpe.v14i2.842\&partnerID=40\&md5=de46ef42a2dff1ff1937b31977015b55}, author = {Attaoui, N.a and Ganzha, M.b and Paprzycki, M.b and Wasielewska, K.b and Essaaidi, M.c} } @conference {Bouirouga2013422, title = {Neural network adult videos recognition using jointly face shape and skin feature extraction}, booktitle = {VISAPP 2013 - Proceedings of the International Conference on Computer Vision Theory and Applications}, volume = {1}, year = {2013}, note = {cited By 0}, pages = {422-425}, abstract = {This paper presents a novel approach for video adult detection using face shape, skin threshold technique and neural network. The goal of employing skin-color information is to select the appropriate color model that allows verifying pixels under different lighting conditions and other variations. Then, the output videos are classified by neural network. The simulation shows that this system achieved 95.4\% of the true rate.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84878245129\&partnerID=40\&md5=efe58b6bdb1387e7884a28b2a0f6f0bb}, author = {Bouirouga, H.a and Elfkihi, S.b and Jilbab, A.c and Aboutajdine, D.a} } @article {Jorio2013717, title = {A new clustering algorithm for wireless sensor networks}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {49}, number = {3}, year = {2013}, note = {cited By 1}, pages = {717-724}, abstract = {Wireless sensor networks have recently become an attractive research area. However, saving energy and, thus, extending the wireless sensor network lifetime entails great challenges. For this reason, clustering techniques are largely made use of. In this paper we propose a new algorithm based on the principle of spectral clustering methods. Especially, we use the K-ways spectral clustering algorithm. The main characteristic of our proposal is that it defines the optimal number of clusters and dynamically changes the election probabilities of the cluster heads based on their residual energy. Up on analyzing the impact of node density on the robustness of the proposed algorithm as well as on its energy and lifetime gains, simulation results show that the approach actually improves the lifetime of a whole network and presents more energy efficiency distribution compared to Low-Energy Adaptive Clustering Hierarch, Centralized Low-Energy Adaptive Clustering Hierarch, and Distance-Energy Cluster Structure approaches. {\textcopyright} 2005 - 2013 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84875692769\&partnerID=40\&md5=ea5125d9aa1b9b5f0b4c870cfad69e2e}, author = {Jorio, A.a and Fkihi, S.E.b and Elbhiri, B.c and Aboutajdine, D.a} } @article {Benhaddi20132013, title = {A new enterprise integration-based framework for enterprise physical mashup}, journal = {International Review on Computers and Software}, volume = {8}, number = {8}, year = {2013}, note = {cited By 0}, pages = {2013-2024}, abstract = {In the Web of Things, devices of daily life are empowered through a web-enabling process to become integrable with computer network. These devices - called smart objects - are becoming very useful both in enterprises and in simple users daily life; in fact, they provide easy access to useful services and can collaborate with each other to build a collective intelligence capable of performing routine but very important tasks. In some critical situations, the smart objects collaborations need to be built by end users themselves in order to respond quickly to any new situational need. These collaborations can be simple or can consist of sophisticated and advanced use cases, which we call in this paper: the Enterprise Physical Mashups (EPMs). Existing work do not provide solutions for end user development of advanced use cases while addressing the requirements of a physical world. In this paper, we try to formalize the services composition aspect in the Enterprise Physical Mashup development by proposing a new rich integration language based on the advanced Enterprise Integration Patterns (EIPS). We also introduce new key concepts for an intuitive and self-explanatory methodology for end users physical services integration. Through these contributions, we give the promise of achieving efficient enterprise-class physical services integration. {\textcopyright} 2013 Praise Worthy Prize S.r.l. - All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84885905057\&partnerID=40\&md5=2db6a869086c9e8d445c40ca78ae6a31}, author = {Benhaddi, M.a and Ba{\"\i}na, K.b and Abdelwahed, E.a} } @conference {Elbhiri2013, title = {A new spectral classification for robust clustering in wireless sensor networks}, booktitle = {Proceedings of 2013 6th Joint IFIP Wireless and Mobile Networking Conference, WMNC 2013}, year = {2013}, note = {cited By 1}, abstract = {Wireless sensor network has recently become an area of attractive research interest. It consists of low-cost, low power, and energy-constrained sensors responsible for monitoring a physical phenomenon and reporting to sink node where the end-user can access the data. Saving energy and therefore extending the wireless sensor network lifetime, involves great challenges. For these purposes, clustering techniques are largely used. Using many empirical successes of spectral clustering methods, we propose a new algorithm that we called Spectral Classification for Robust Clustering in Wireless Sensor Networks (SCRC-WSN). This protocol is a spectral partitioning method using graph theory technics with the aim to separate the network in a fixed optimal number of clusters. The cluster{\textquoteright}s nodes communicate with an elected node called cluster head, and then the cluster heads communicate the information to the base station. Defining the optimal number of clusters and changing dynamically the cluster head election probability are the SCRC-WSN strongest characteristics. In addition our proposed protocol is a centralized one witch take into account the node{\textquoteright}s residual energy to define the cluster heads. We studied the impact of node density on the robustness of the SCRC-WSN algorithm as well as its energy and its lifetime gains. Simulation results show that the proposed algorithm increases the lifetime of a whole network and presents more energy efficiency distribution compared to the Low-Energy Adaptive Clustering Hierarchy (LEACH) approach and the Centralized LEACH (LEACH-C)one. {\textcopyright} 2013 IEEE.}, doi = {10.1109/WMNC.2013.6548982}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881393741\&doi=10.1109\%2fWMNC.2013.6548982\&partnerID=40\&md5=648ff469917070df5c7f54337dfa592b}, author = {Elbhiri, B.a b and Fkihi, S.E.b c and Saadane, R.b d and Lasaad, N.b and Jorio, A.b and Aboutajdine, D.b} } @conference {Ghenname2013, title = {Personalized recommendation based hashtags on e-learning systems}, booktitle = {2013 3rd International Symposium ISKO-Maghreb}, year = {2013}, note = {cited By 0}, abstract = {The data generated by users on various social structures are growing exponentially over time. They become increasingly prodigious unmanageable and difficult to use. Therefore to easily find the content they produce among this mass of data, users label their own content using neologisms appointed hashtags. This practice attracts more and more the interest of researchers, because beyond the acquisition of knowledge, the Semantic Web approaches are also producing relevant information that may be used in practical situations. In this direction, we thought to exploit the activities of social Web users, mainly Hashtags. Hence, we focused on the identification of hashtags (as well as their different definitions) for personalized recommendation on e-learning systems. This paper aims at giving an insight on the pioneers{\textquoteright} works and the opportunities raised by mixing the Social and the Semantic Web for education on one hand. And give the general architecture of our proposition and results obtained on the other hand. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ISKO-Maghreb.2013.6728109}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84894115554\&doi=10.1109\%2fISKO-Maghreb.2013.6728109\&partnerID=40\&md5=d8892d965f690375fac4c7923f557c92}, author = {Ghenname, M.a b and Abik, M.a and Ajhoun, R.a and Subercaze, J.b and Gravier, C.b and Laforest, F.b} } @conference { ISI:000350287800004, title = {Personalized Recommendation Based Hashtags on E-learning Systems}, booktitle = {2013 3RD INTERNATIONAL SYMPOSIUM ISKO-MAGHREB}, year = {2013}, note = {ISKO-Maghreb 3rd International Symposium, Marrakech, MOROCCO, NOV 08-09, 2013}, publisher = {ISKO}, organization = {ISKO}, abstract = {The data generated by users on various social structures are growing exponentially over time. They become increasingly prodigious unmanageable and difficult to use. Therefore to easily find the content they produce among this mass of data, users label their own content using neologisms appointed hashtags. This practice attracts more and more the interest of researchers, because beyond the acquisition of knowledge, the Semantic Web approaches are also producing relevant information that may be used in practical situations. In this direction, we thought to exploit the activities of social Web users, mainly Hashtags. Hence, we focused on the identification of hashtags (as well as their different definitions) for personalized recommendation on e-learning systems. This paper aims at giving an insight on the pioneers{\textquoteright} works and the opportunities raised by mixing the Social and the Semantic Web for education on one hand. And give the general architecture of our proposition and results obtained on the other hand.}, isbn = {978-1-4799-3392-1}, author = {Ghenname, Merieme and Abik, Mounia and Ajhoun, Rachida and Subercaze, Julien and Gravier, Christophe and Laforest, Frederique} } @conference {Ouacha2013, title = {Proactive routing based distributed energy consumption}, booktitle = {2013 8th International Conference on Intelligent Systems: Theories and Applications, SITA 2013}, year = {2013}, note = {cited By 0}, abstract = {Energy consumption in mobile network might be excessive for some nodes acting as relays in OLSR network than others. This might have a considerable consequence on the network lifetime. Our approach is presented in the EDCR protocol. It is an extended version of OLSR which aims to increase the residual energy of the network by distributing forwarding tasks between MPRs. For that, we change the MPR procedure selection in order to favor nodes with the largest number of MPR-selector. Our hypothesis is experienced under NS2 and effectively results on less energy consumption {\textcopyright} 2013 IEEE.}, doi = {10.1109/SITA.2013.6560785}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883057326\&doi=10.1109\%2fSITA.2013.6560785\&partnerID=40\&md5=2831baa796bde04305344dd6b1643db1}, author = {Ouacha, A.a and El Abbadi, J.a and Habbani, A.b and Bouamoud, B.b} } @conference {ElHamlaoui2013, title = {A process for maintaining heterogeneous models consistency through change synchronization}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, year = {2013}, note = {cited By 0}, abstract = {This paper falls into the context of modeling complex systems according to various viewpoints. More precisely, it presents an iterative process of heterogeneous models consistency management - by taking into account various types of evolution - based on building a correspondence model. In the case of models evolution, this process is intended to capture changes in the models, to list modifications to be made in the impacted models and finally to update the correspondence model for a future iteration. {\textcopyright} 2013 IEEE.}, doi = {10.1109/AICCSA.2013.6616433}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887221398\&doi=10.1109\%2fAICCSA.2013.6616433\&partnerID=40\&md5=6948de7ad7e858c47c2ada2eea43e134}, author = {El Hamlaoui, M.a b and Ebersold, S.a and Anwar, A.c and Nassar, M.b and Coulette, B.a} } @conference { ISI:000352386800106, title = {(Q, s) models for inventory policy with random component procurement lead times}, booktitle = {PROCEEDINGS OF 2013 INTERNATIONAL CONFERENCE ON INDUSTRIAL ENGINEERING AND SYSTEMS MANAGEMENT (IEEE-IESM 2013)}, year = {2013}, note = {5th International Conference on Industrial Engineering and Systems Management (IEEE IESM), Mohammadia Sch Engn, Rabat, MOROCCO, OCT 28-30, 2013}, pages = {718-726}, publisher = {IEEE; IEEE Morocco Sect; I4e2; Univ Valenciennes Hannaut Cambresis; Univ Mohammed V Agdal Rabat; CNRST; CISIT; ENIM; ESITH; GDR MACS; LAMIH CNRS; IFSTTAR; IRT RAILENIUM; TEMPO}, organization = {IEEE; IEEE Morocco Sect; I4e2; Univ Valenciennes Hannaut Cambresis; Univ Mohammed V Agdal Rabat; CNRST; CISIT; ENIM; ESITH; GDR MACS; LAMIH CNRS; IFSTTAR; IRT RAILENIUM; TEMPO}, abstract = {This paper considers inventory models of (Q, s) type with Q the order-quantity and s the order point. In general, an optimal choice of control parameters (Q and s) will depend on the characteristics of replenishment lead time and the demand process, as well as holding and shortage costs. Although many studies have treated lead time as constant, focusing only on demand variability, a number of authors have shown that a stochastic lead time is an issue that can have significant impact on inventory models and systems. This paper addresses the model when supply is assumed to have an exponential lead time distribution, and the {\textquoteleft}{\textquoteleft}customer{{\textquoteright}{\textquoteright}} itself is an exponential unreliable manufacturing plant, aiming for a constant mean production rate; thus both supply and demand have random characteristics. Under a so-called averaging principle approximation, we derive closed form expressions of the quantities of interest and illustrate their application in the optimization of the Q and s parameters.}, isbn = {978-2-9600532-4-1}, author = {Mhada, Fatima and Malhame, Roland and Pellerin, Robert and Sadr, Javad and Gharbi, Ali}, editor = {Aboutajdine, D and Skalli, A and Benchekroun, B and Artiba, A} } @conference {Faqihi201356, title = {Semantic Interoperability in the d-learning in the era of cloud computing: Simplicity or complexity}, booktitle = {2013 International Conference on Interactive Collaborative Learning, ICL 2013}, year = {2013}, note = {cited By 3}, pages = {56-60}, abstract = {Since traditional computing of the 60s, to the innovations of the 80s, passing by emergence of internet, the virtualization advances, the concept of cloud computing have a promising future. Certainly much remains to be done especially for interoperability but also the establishment of norms and standards that will allow to build a set of heterogeneous systems. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ICL.2013.6644536}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84898656730\&doi=10.1109\%2fICL.2013.6644536\&partnerID=40\&md5=761c2a496eee37edd0f6c37413f7ff35}, author = {Faqihi, B.a and Daoudi, N.b and Ajhoun, R.a} } @conference { ISI:000326538300127, title = {Sharing demand forecasts in a basic supply chain using game theory}, booktitle = {2013 5TH INTERNATIONAL CONFERENCE ON MODELING, SIMULATION AND APPLIED OPTIMIZATION (ICMSAO)}, year = {2013}, note = {5th International Conference on Modeling, Simulation and Applied Optimization (ICMSAO), Hammamet, TUNISIA, APR 28-30, 2013}, abstract = {Supply chain can be defined as a set of distributed entities composed of manufacturers, suppliers, distributors, retailers and costumers. Supply chain management (SCM) is the supervision of its three flows: the finances flow, the materials flow and the information flow. Indeed, this paper focuses on the information flow, more precisely on sharing information within a one echelon supply chain. It examines a game theory approach applied on a basic supply chain composed of three entities called players: a retailer, a distributor and a supplier with probabilistic demand. Each player is rational: tries to maximize his profit, and consequently tries to find a local optimum; this situation leads to a loss of performance of the whole chain. The goal of our work is to optimize both inventory and transportation costs by using the concept of Game Theory.}, isbn = {978-1-4673-5814-9; 978-1-4673-5812-5}, author = {Slimani, Ilham and Achchab, Said} } @conference {Benhaddi2013, title = {SOA for the masses: End users as services composers}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, year = {2013}, note = {cited By 0}, abstract = {The end user service development known as the user-centric SOA emerged as a new approach that allows giving the end user the ability to create on the fly his own applications that meet a situational need. In fact, the classical SOA was designed for developers and is characterized by a heavy technical stack which is out of reach of end users. Lightweight Web 2.0 technologies such as Mashup appeared to bridge this gap and provide a new agile and quick way to compose and integrate different resources in a dynamic and on the fly manner. However, Mashups are emerging applications, and thus consist of immature, non intuitive and non formalized area. In this paper, we formalize the user-centric SOA development by introducing a new rich integration language based on the advanced Enterprise Integration Patterns (EIPS). We also propose a new intuitive and self-explanatory semantic methodology and interaction model for end users services integration. {\textcopyright} 2013 IEEE.}, doi = {10.1109/AICCSA.2013.6616485}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887215517\&doi=10.1109\%2fAICCSA.2013.6616485\&partnerID=40\&md5=ab05e567efec19575ace07de6e812cdb}, author = {Benhaddi, M.a and Ba{\"\i}na, K.b and Abdelwahed, E.H.a} } @article {Belaoud2013357, title = {Survey of SIP authentication mechanisms}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {58}, number = {2}, year = {2013}, note = {cited By 0}, pages = {357-365}, abstract = {In recent years Voice over Internet Protocol (VoIP) has become a popular NGN (Next Generation Network) technology. As this technology is built on internet protocol it is affected by a critical security problems. The Session Initiation Protocol (SIP) is considered as the most used signaling protocol for calls over the Internet for establishing, maintaining and terminating VoIP calls. The security of SIP is becoming more and more important. The prime security service required by SIP is authentication. This paper focuses on the SIP security mechanisms of authentication. We survey the newly proposed methods of authentication then we proceed to evaluate these methods in view of security efficiency and computational cost. {\textcopyright} 2005 - 2013 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84890889665\&partnerID=40\&md5=350ed13308e43f7e4915fd0fd863b414}, author = {Belaoud, H.a and El Abbadi, J.a and Habbani, A.b} } @conference { ISI:000326649800031, title = {Towards a Generic Composition of Specific Domain Models}, booktitle = {2013 8TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA)}, year = {2013}, note = {8th International Conference on Intelligent Systems - Theories and Applications (SITA), Ecole Mohammadia Ingenieurs, Rabat, MOROCCO, MAY 08-09, 2013}, publisher = {IEEE; IEEE Morocco Sect; Univ Mohammed V Agdal}, organization = {IEEE; IEEE Morocco Sect; Univ Mohammed V Agdal}, abstract = {Systems become complex and no more restricted to one specific domain. So, it is impracticable to describe the whole system by one model. This leads to resolve the problem of composing the domain models described by the system. In this context, this article proposes a new approach for composing specific domain models. First, we analyze some related works. On the basis of the key findings and conclusions drawn from the analysis, we propose a multidimensional approach based on the composition of crosscutting concerns contained in the source domain models.}, isbn = {978-1-4799-0299-6}, author = {Baya, Asmaa and El Asri, Bouchra and Nassar, Mahmoud} } @conference {Baya2013, title = {Towards a generic composition of specific domain models}, booktitle = {2013 8th International Conference on Intelligent Systems: Theories and Applications, SITA 2013}, year = {2013}, note = {cited By 0}, abstract = {Systems become complex and no more restricted to one specific domain. So, it is impracticable to describe the whole system by one model. This leads to resolve the problem of composing the domain models described by the system. In this context, this article proposes a new approach for composing specific domain models. First, we analyze some related works. On the basis of the key findings and conclusions drawn from the analysis, we propose a multidimensional approach based on the composition of crosscutting concerns contained in the source domain models. {\textcopyright} 2013 IEEE.}, doi = {10.1109/SITA.2013.6560814}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883068655\&doi=10.1109\%2fSITA.2013.6560814\&partnerID=40\&md5=305636eab7c0f637087ff1c25c8b1cd7}, author = {Baya, A. and El Asri, B. and Nassar, M.} } @conference { ISI:000341079600033, title = {A Traceability Approach for Model Composition}, booktitle = {2013 ACS INTERNATIONAL CONFERENCE ON COMPUTER SYSTEMS AND APPLICATIONS (AICCSA)}, year = {2013}, note = {IEEE and Arab-Computing-Society (ACS) International Conference on Computer Systems and Applications (AICCSA), Al Akhawayn Univ, Ifrane, MOROCCO, MAY 27-30, 2013}, publisher = {IEEE; Arab Comp Soc}, organization = {IEEE; Arab Comp Soc}, abstract = {Model composition is one of the important activities in model driven engineering, because complex systems are built based on several viewpoints. The composition operation is a tedious task, however, traceability not only provides us with the tool to comprehend its effects, but also helps us to better manage the operation itself. This paper suggests a traceability framework for the model composition operation based on aspect oriented modeling and graph transformation. The generated trace models conform to a generic metamodel that allows representing nested trace links. A merge specification case study is presented to illustrate our contribution.}, isbn = {978-1-4799-0792-2}, author = {Laghouaouta, Youness and Anwar, Adil and Nassar, Mahmoud} } @conference {Laghouaouta2013, title = {A traceability approach for model composition}, booktitle = {Proceedings of IEEE/ACS International Conference on Computer Systems and Applications, AICCSA}, year = {2013}, note = {cited By 0}, abstract = {Model composition is one of the important activities in model driven engineering, because complex systems are built based on several viewpoints. The composition operation is a tedious task, however, traceability not only provides us with the tool to comprehend its effects, but also helps us to better manage the operation itself. This paper suggests a traceability framework for the model composition operation based on aspect oriented modeling and graph transformation. The generated trace models conform to a generic metamodel that allows representing nested trace links. A merge specification case study is presented to illustrate our contribution. {\textcopyright} 2013 IEEE.}, doi = {10.1109/AICCSA.2013.6616448}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887237893\&doi=10.1109\%2fAICCSA.2013.6616448\&partnerID=40\&md5=9494f946bc218d42398e67c721612d5f}, author = {Laghouaouta, Y.a and Anwar, A.b and Nassar, M.a} } @conference { ISI:000358736600001, title = {On the Use of Software Quality Standard ISO/IEC9126 in Mobile Environments}, booktitle = {2013 20TH ASIA-PACIFIC SOFTWARE ENGINEERING CONFERENCE (APSEC 2013), VOL 1}, series = {Asia-Pacific Software Engineering Conference}, year = {2013}, note = {20th Asia-Pacific Software Engineering Conference (APSEC), Chulalongkorn Univ, Fac Engn, Dept Comp Engn, Bangkok, THAILAND, DEC 02-05, 2013}, pages = {1-8}, publisher = {Prov Elect Author; Metropolitan Elect Author; Thailand Convent \& Exhibit Bur; IEEE Comp Soc}, organization = {Prov Elect Author; Metropolitan Elect Author; Thailand Convent \& Exhibit Bur; IEEE Comp Soc}, abstract = {The capabilities and resources offered by mobile technologies are still far from those provided by fixed environments, and this poses serious challenges, in terms of evaluating the quality of applications operating in mobile environments. This article presents a study to help quality managers apply the ISO 9126 standard on software quality, particularly the External Quality model, to mobile environments. The influence of the limitations of mobile technologies are evaluated for each software quality characteristic, based on the coverage rates of its external metrics, which are themselves influenced by these limitations. The degrees of this influence are discussed and aggregated to provide useful recommendations to quality managers for their evaluation of quality characteristics in mobile environments. These recommendations are intended for mobile software in general and aren{\textquoteright}t targeted a specific ones. The External Quality model is especially valuable for assessing the Reliability, Usability, and Efficiency characteristics, and illustrates very well the conclusive nature of the recommendations of this study. However, more study is needed on the other quality characteristics, in order to determine the relevance of evaluating them in mobile environments.}, isbn = {978-1-4799-2143-0}, issn = {1530-1362}, doi = {10.1109/APSEC.2013.12}, author = {Idri, Ali and Moumane, Karima and Abran, Alain}, editor = {Muenchaisri, P and Rothermel, G} } @conference {Idri20131, title = {On the use of software quality standard ISO/IEC9126 in mobile environments}, booktitle = {Proceedings - Asia-Pacific Software Engineering Conference, APSEC}, volume = {1}, year = {2013}, note = {cited By 5}, pages = {1-8}, abstract = {The capabilities and resources offered by mobile technologies are still far from those provided by fixed environments, and this poses serious challenges, in terms of evaluating the quality of applications operating in mobile environments. This article presents a study to help quality managers apply the ISO 9126 standard on software quality, particularly the External Quality model, to mobile environments. The influence of the limitations of mobile technologies are evaluated for each software quality characteristic, based on the coverage rates of its external metrics, which are themselves influenced by these limitations. The degrees of this influence are discussed and aggregated to provide useful recommendations to quality managers for their evaluation of quality characteristics in mobile environments. These recommendations are intended for mobile software in general and aren{\textquoteright}t targeted a specific ones. The External Quality model is especially valuable for assessing the Reliability, Usability, and Efficiency characteristics, and illustrates very well the conclusive nature of the recommendations of this study. However, more study is needed on the other quality characteristics, in order to determine the relevance of evaluating them in mobile environments. {\textcopyright} 2013 IEEE.}, doi = {10.1109/APSEC.2013.12}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84936873378\&doi=10.1109\%2fAPSEC.2013.12\&partnerID=40\&md5=c03a500f9cc8333928ee18beee27b06c}, author = {Idri, A.a and Moumane, K.a and Abran, A.b} } @article {Benhaddi2013523, title = {The user-centric soa and its impacts on other disciplines}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {54}, number = {3}, year = {2013}, note = {cited By 1}, pages = {523-531}, abstract = {The end user service development known as the user-centric SOA emerged as a new approach that allows giving the end user the ability to create on the fly his own applications that meet a situational need. In fact, the classical SOA was designed for developers and is characterized by a heavy technical stack which is out of reach of end users. The user-centric SOA approach brings great added value taking advantage from the creativity and the participation of end users; furthermore, it is becoming a key factor of competitiveness within enterprises and is influencing other disciplines. The Cloud Computing and the Enterprise Architecture are two fields that captured growing attention in recent years and that can gain maturity and efficiency from adopting and embracing the user-centric SOA approach. In this paper, we present the user-centric SOA approach and we give an overview of our solution for an efficient user-centric services creation. Then we study the impact of the user-centric SOA on the Cloud Computing and Enterprise Architecture fields. {\textcopyright} 2005 - 2013 JATIT \& LLS. All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883372686\&partnerID=40\&md5=f6c288a49e40715a31fbd5b2f489de7e}, author = {Benhaddi, M.a and Ba{\"\i}na, K.b and Abdelwahed, E.H.a} } @article {Choukri20122121, title = {AODV protocol behavior based on H.264 multiservice traffic}, journal = {International Review on Computers and Software}, volume = {7}, number = {5}, year = {2012}, note = {cited By 2}, pages = {2121-2125}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84873308095\&partnerID=40\&md5=a7d603a75c0b90d10b0e6ac5e06924fe}, author = {Choukri, A. and Amnai, M. and Habbani, A. and Elkoutbi, M.} } @conference { ISI:000325229700053, title = {Design of distributed IMS by classification and evaluation of costs for secured architecture}, booktitle = {2012 SECOND INTERNATIONAL CONFERENCE ON INNOVATIVE COMPUTING TECHNOLOGY (INTECH)}, year = {2012}, note = {2nd International Conference on Innovative Computing Technology (INTECH), Casablanca, MOROCCO, SEP 18-20, 2012}, pages = {291-296}, publisher = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, organization = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, abstract = {The core of Next Generation Network(NGN) IP Multimedia subsystem(IMS) based on SIP as mechanism signaling, is an important challenge for supporting data communication services, voice, video, messaging and web-based technologies. In this work we present a novel design of architecture and turns up some challenges of new IMS architecture and security system. This architecture provides a robustness, reliability, scalability and strategy for extension in the future and responds to the security challenges. We introduced the architecture with clustering database HSS and automatic storage of data that give a secure database. This paper give a classification of security in IMS network, modulate the risk in IMS network and our comparison is giving by cost signaling interworking with and without securing Gateway (SEG). We show that there is a tradeoff between the level of increasing system security and the potential cost incurred. we conclude that this architecture is suitable for operators and services providers for the new business models.}, isbn = {978-1-4673-2679-7}, author = {Allouch, Hamid and Belkasmi, Mostafa} } @conference {Allouch2012291, title = {Design of distributed IMS by classification and evaluation of costs for secured architecture}, booktitle = {2nd International Conference on Innovative Computing Technology, INTECH 2012}, year = {2012}, note = {cited By 0}, pages = {291-296}, doi = {10.1109/INTECH.2012.6457814}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874489555\&doi=10.1109\%2fINTECH.2012.6457814\&partnerID=40\&md5=af127257372de35e045a5f6946b76a7f}, author = {Allouch, H. and Belkasmi, M.} } @article {8699110620120101, title = {Efficient Dual Domain Decoding of Linear Block Codes Using Genetic Algorithms.}, journal = {Journal of Electrical \& Computer Engineering}, year = {2012}, pages = {1 - 12}, abstract = {A computationally efficient algorithm for decoding block codes is developed using a genetic algorithm (GA). The proposed algorithm uses the dual code in contrast to the existing genetic decoders in the literature that use the code itself. Hence, this new approach reduces the complexity of decoding the codes of high rates. We simulated our algorithm in various transmission channels. The performance of this algorithm is investigated and compared with competitor decoding algorithms including Maini and Shakeel ones. The results show that the proposed algorithm gives large gains over the Chase-2 decoding algorithm and reach the performance of the OSD-3 for some quadratic residue (QR) codes. Further, we define a new crossover operator that exploits the domain specific information and compare it with uniform and two point crossover. The complexity of this algorithm is also discussed and compared to other algorithms [ABSTRACT FROM AUTHOR]}, keywords = {Coding theory, Computational complexity, Data transmission systems, Decoders (Electronics) {\textendash} Software, Genetic algorithms, Performance evaluation}, issn = {20900147}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=86991106\&site=ehost-live}, author = {Azouaoui, Ahmed and Belkasmi, Mostafa and Farchane, Abderrazak} } @article {Azouaoui2012, title = {Efficient dual domain decoding of linear block codes using genetic algorithms}, journal = {Journal of Electrical and Computer Engineering}, year = {2012}, note = {cited By 1}, doi = {10.1155/2012/503834}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84859819545\&doi=10.1155\%2f2012\%2f503834\&partnerID=40\&md5=6e6f96bbb49e27882bf30fa87aebeee4}, author = {Azouaoui, A. and Belkasmi, M. and Farchane, A.} } @conference {Askali2012318, title = {An efficient method to find the minimum distance of linear block codes}, booktitle = {Proceedings of 2012 International Conference on Multimedia Computing and Systems, ICMCS 2012}, year = {2012}, note = {cited By 1}, pages = {318-324}, doi = {10.1109/ICMCS.2012.6320261}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869794196\&doi=10.1109\%2fICMCS.2012.6320261\&partnerID=40\&md5=39536ce39eb94470202c180890ae28d0}, author = {Askali, M. and Nouh, S. and Belkasmi, M.} } @conference { ISI:000310353000141, title = {An Efficient method to find the Minimum Distance of Linear Block Codes}, booktitle = {2012 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2012}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Tangiers, MOROCCO, MAY 10-12, 2012}, pages = {773-779}, publisher = {Morocco Sect MTT/AP Joint Chapter}, organization = {Morocco Sect MTT/AP Joint Chapter}, abstract = {Finding the minimum distance of linear codes is in general a NP-hard problem, we propose an efficient algorithm to attack this problem. The principle of this approach is to search code words locally around the all-zero code word perturbed by a level of noise magnitude, in other words the maximum of noise that can be corrected by a Soft-In decoder, anticipating that the resultant nearest non-zero code words will most likely contain the minimum Hamming weight code word, whose Hamming weight is equal to the minimum distance of the linear code. A numerous results prove that the proposed algorithm is valid for general linear codes and it is very fast comparing to all others known techniques, therefore it is a good tool for computing. Comparing to Joanna{\textquoteright}s works, we proof that our algorithm has a low complexity with a fast time of execution. For some linear RQs, QDCs and BCHs codes with unknown minimum distance, we give a good estimation (true) of the minimum distance where the length is less than 439.}, isbn = {978-1-4673-1520-3}, author = {Askali, Mohamed and Nouh, Said and Belkasmi, Mostafa}, editor = {Essaaidi, M and Zaz, Y} } @conference {Alrajeh2012, title = {Formal specification of humanitarian disaster management processes}, booktitle = {2012 6th International Symposium on Medical Information and Communication Technology, ISMICT 2012}, year = {2012}, note = {cited By 0}, doi = {10.1109/ISMICT.2012.6203056}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84862859354\&doi=10.1109\%2fISMICT.2012.6203056\&partnerID=40\&md5=ac007a8b1fa1e3b7f2a0d7a0e6da193c}, author = {Alrajeh, N.A. and Bounabat, B.} } @article {EJ97696120120101, title = {Impact of Technological Advancement on Pedagogy.}, journal = {Turkish Online Journal of Distance Education}, volume = {13}, number = {1}, year = {2012}, pages = {224 - 237}, abstract = {To improve the quality of learning, pedagogues have prescribed different pedagogical approaches (constructivist, cognitivist...). However, the effective implementation of the majority of these approaches has not been possible only after the advent of new forms of learning (E_learning, M-learning...). These forms are closely related to technological development. Later with the emergence of technology (pervasive computing, Artificial Intelligent...) a new form of learning is established. It is called Pervasive Learning "P-Learning". P-Learning is a social process that connects learners to communities of devices, people, and situations in a transparent and independent manner. This learning form goes far beyond the predictions suggested by pedagogue. Learning can then take part outside the learner via technology, which will be an extension of his brain by unloading the cognitive practices he performs. The aim of this paper is to answer the following questions: What alliance is there betwe}, keywords = {Change Strategies, Computer assisted instruction, Constructivism (Learning), Distance Education, Educational Change, Educational Technology, Electronic Learning, Influence of Technology, Instructional Effectiveness, Performance Factors, Synchronous Communication, Teaching Methods, Technological Advancement, Technology Uses in Education, Virtual Classrooms}, issn = {1302-6488}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=eric\&AN=EJ976961\&site=ehost-live}, author = {Abik, Mounia and Ajhoun, Rachida and Ensias, Lerma} } @article {Abik2012224, title = {Impact of technological advancement on pedagogy}, journal = {Turkish Online Journal of Distance Education}, volume = {13}, number = {1}, year = {2012}, note = {cited By 3}, pages = {224-237}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84861667801\&partnerID=40\&md5=9e34db346a013df6dc45bddf6665ffec}, author = {Abik, M. and Ajhoun, R.} } @article {Lakki2012217, title = {The integration ofthe speedof mobilityin the selection OFMPR to improve theqosin ad hoc networks}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {36}, number = {2}, year = {2012}, note = {cited By 1}, pages = {217-226}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84857970125\&partnerID=40\&md5=23ad0cf075d2275f629e3d075e1d94f5}, author = {Lakki, N. and Ouacha, A. and Habbani, A. and El Abbadi, J.} } @article { ISI:000311773200010, title = {Interoperability optimization in healthcare collaboration networks}, journal = {BIOMEDICAL ENGINEERING-BIOMEDIZINISCHE TECHNIK}, volume = {57}, number = {5}, year = {2012}, pages = {403-411}, abstract = {Interoperability is one of the most challenging concerns that face healthcare information system (HIS) actors. Interoperability implementation in this context may be a data exchange interfacing, a service oriented interaction or even a composition of new composite healthcare processes. In fact, optimizing efforts of interoperability achievement is a key requirement to effectively setup, develop and evolve intra- and inter-organizational collaboration. To ensure interoperability project effectiveness, this paper proposes a modeling representation of health processes interoperability evolution. Interoperability degrees of involved automated processes are assessed using a ratio metric, taking into account all significant aspects, such as potentiality, compatibility and operational performance. Then, a particle swarm optimization algorithm (PSO) is used as a heuristic optimization method to find the best distribution of effort needed to establish an efficient healthcare collaborative network.}, issn = {0013-5585}, doi = {10.1515/bmt-2011-0118}, author = {Alrajeh, Nabil A. and Elmir, Badr and Bounabat, Bouchaib and El Hami, Norelislam} } @article {Alrajeh2012403, title = {Interoperability optimization in healthcare collaboration networks}, journal = {Biomedizinische Technik}, volume = {57}, number = {5}, year = {2012}, note = {cited By 0}, pages = {403-411}, doi = {10.1515/bmt-2011-0118}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84867797852\&doi=10.1515\%2fbmt-2011-0118\&partnerID=40\&md5=f6e8a6471b8d3fa0b5c770ac7ea27b80}, author = {Alrajeh, N.A. and Elmir, B. and Bounabat, B. and Hami, N.E.} } @article {Belaoud201285, title = {Mobile networks migration towards NGN}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {36}, number = {1}, year = {2012}, note = {cited By 0}, pages = {85-91}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84857179051\&partnerID=40\&md5=e4d7df56d5dbc346177de55798050977}, author = {Belaoud, H. and Habbani, A. and El Abbadi, J.} } @conference {ElAichi2012492, title = {Moroccan E-government ten years evolutions}, booktitle = {ACM International Conference Proceeding Series}, year = {2012}, note = {cited By 0}, pages = {492-493}, doi = {10.1145/2463728.2463832}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84877304077\&doi=10.1145\%2f2463728.2463832\&partnerID=40\&md5=9561ba56c8ab49e79eb5fedc75b0eecf}, author = {El Aichi, M.M.E.} } @conference { ISI:000310353000213, title = {A New Genetic Decoding of Linear Block Codes}, booktitle = {2012 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2012}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Tangiers, MOROCCO, MAY 10-12, 2012}, pages = {1176-1182}, publisher = {Morocco Sect MTT/AP Joint Chapter}, organization = {Morocco Sect MTT/AP Joint Chapter}, abstract = {In this paper, we present a new hard-decision decoding technique based on Genetic Algorithms, which is applicable to the more general case where the only known structure is given by the parity-check matrix H. The proposed algorithm uses the dual code in contrast to the existing genetic decoders in the literature that use the code itself. Hence, this new approach reduces the complexity of decoding the codes of high rates. We also presented a new soft-decision decoding based on Genetic Algorithms and the Chase algorithm. The simulations applied on some binary Linear Block Codes, show that the genetic decoder has the same performance as the Berlekamp-Massey Algorithm (BM) and Cardoso algorithm.}, isbn = {978-1-4673-1520-3}, author = {Azouaoui, Ahmed and Belkasmi, Mostafa}, editor = {Essaaidi, M and Zaz, Y} } @conference {Azouaoui20121176, title = {A new genetic decoding of Linear Block Codes}, booktitle = {Proceedings of 2012 International Conference on Multimedia Computing and Systems, ICMCS 2012}, year = {2012}, note = {cited By 2}, pages = {1176-1182}, doi = {10.1109/ICMCS.2012.6320254}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869797472\&doi=10.1109\%2fICMCS.2012.6320254\&partnerID=40\&md5=61049c5a211ceb9447427a9468114bf1}, author = {Azouaoui, A. and Belkasmi, M.} } @conference {Ouacha2012305, title = {New mobility metric based on MultiPoint Relay life duration}, booktitle = {SIGMAP 2012, WINSYS 2012 - Proceedings of the International Conference on Signal Processing and Multimedia Applications and Wireless Information Networks and Systems}, year = {2012}, note = {cited By 0}, pages = {305-309}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84867639975\&partnerID=40\&md5=e94005ced33c5ad754bab9aa88330260}, author = {Ouacha, A. and Lakki, N. and Habbani, A. and El Abbadi, J.} } @article {ElHajTirari201270, title = {A new optimal quadratic predictor of a residual linear model in a finite population}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {42}, number = {1}, year = {2012}, note = {cited By 0}, pages = {70-74}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84867528812\&partnerID=40\&md5=3adc9b77708ed77880f3937f292856ec}, author = {El Haj Tirari, M. and El Afia, A. and Faizi, R.} } @conference {Bensiali2012, title = {Novel approach for accessible visual resources in a Web based learning environment}, booktitle = {IEEE Global Engineering Education Conference, EDUCON}, year = {2012}, note = {cited By 0}, doi = {10.1109/EDUCON.2012.6201150}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84864129251\&doi=10.1109\%2fEDUCON.2012.6201150\&partnerID=40\&md5=4f3934faee28a47ea5bff8c231d3b82d}, author = {Bensiali, S. and Ajhoun, R. and Abik, M.} } @article {Anoual201227, title = {A novel texture-based algorithm for localizing vehicle license plates}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {45}, number = {1}, year = {2012}, note = {cited By 1}, pages = {27-32}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874531292\&partnerID=40\&md5=58ad16d91d008ad97e88e70a9234486f}, author = {Anoual, H. and Fkihi, S.E. and Jilbab, A. and Aboutajdine, D.} } @conference { ISI:000310353000045, title = {PERFORMANCE ANALYSIS OF WiFi/WiMAX VERTICAL HANDOVER BASED ON MEDIA INDEPENDENT HANDOVER}, booktitle = {2012 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2012}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Tangiers, MOROCCO, MAY 10-12, 2012}, pages = {239-243}, publisher = {Morocco Sect MTT/AP Joint Chapter}, organization = {Morocco Sect MTT/AP Joint Chapter}, abstract = {Currently, there are several wireless networks deployed around the world. Examples include cellular networks, metropolitan area networks and wireless local area networks. Given the heterogeneity of the existing network technologies, the diversity of mobile applications, and user requirements, ensuring service continuity for a mobile user when changing different access network becomes a mandatory aspect. The aim of this work is to study and analyse a solution based on the IEEE802.21 standard, to enable interoperability between different networks. The assessment takes place in a context of mobility between WiFi and WiMAX networks. The network simulator NS2 is used during this study.}, isbn = {978-1-4673-1520-3}, author = {Benkaouz, Yahya and Angoma, Blaise and Erradi, Mohammed}, editor = {Essaaidi, M and Zaz, Y} } @conference { ISI:000324984400037, title = {Personalization of a hybrid integration system Creation of views to materialize based on the distribution of user queries}, booktitle = {PROCEEDINGS OF 2012 INTERNATIONAL CONFERENCE ON COMPLEX SYSTEMS (ICCS12)}, year = {2012}, note = {1st International Conference on Complex Systems (ICCS), Agadir, MOROCCO, NOV 05-06, 2012}, pages = {224-230}, abstract = {The hybrid mediator is an integration system where one part of data is queried on demand as in the virtual approach, while another part is extracted, filtered and stored in a local database. The selection of data to be stored in the mediator (to materialize) is an essential task. Different selection criteria were proposed to select among a set of candidate views, ones that will be materialized. However, the approaches proposed with regard to the creation of these views are rare. This paper presents a data materialization approach, in the scope of a hybrid integration system. This approach creates candidate views for materialization based on the previous user queries, and then decides which among them should be selected for materialization. Guided by the technique of clustering-merging, it is organized into two steps: (1) from the distribution of previous user queries, are extracted all data most queried by users. The obtained data are classified as candidate views for materialization. (2) then selecting which views to materialize among all the candidate views created in the first step.}, isbn = {978-1-4673-4766-2}, author = {Anter, Samir and Zellou, Ahmed and Idri, Ali}, editor = {Essaaidi, M and Nemiche, M} } @conference {Anter2012, title = {Personalization of a hybrid integration system: Creation of views to materialize based on the distribution of user queries}, booktitle = {Proceedings of 2012 International Conference on Complex Systems, ICCS 2012}, year = {2012}, note = {cited By 0}, doi = {10.1109/ICoCS.2012.6458566}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874493599\&doi=10.1109\%2fICoCS.2012.6458566\&partnerID=40\&md5=8cb54118f3a51aadd4afa9789ecdcc3a}, author = {Anter, S. and Zellou, A. and Idri, A.} } @conference { ISI:000310353000009, title = {QDGRP : A Hybrid QoS Distributed Genetic Routing Protocol for Wireless Sensor Networks}, booktitle = {2012 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2012}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Tangiers, MOROCCO, MAY 10-12, 2012}, pages = {47-52}, publisher = {Morocco Sect MTT/AP Joint Chapter}, organization = {Morocco Sect MTT/AP Joint Chapter}, abstract = {Nature-inspired routing protocols have been subject to a great research activity. Their decentralized nature and low requirements make them highly suitable for Wireless Sensor Networks (WSN) where individual nodes operate without central control and without global overview of the network status to achieve global tasks. This paper proposes a hybrid QoS routing protocol for WSN based on a customized Distributed Genetic Algorithm (DGA) that accounts for delay and energy constraints. The proposed protocol (QDGRP) is compared to AODV w.r.t several performance metrics : end-to-end delay, throughput, packet delivery ratio and energy efficiency. The realized simulations show that QDGRP outperforms AODV and manages to achieve very good performances for various deployment scenarios.}, isbn = {978-1-4673-1520-3}, author = {Koulali, Mohammed-Amine and Kobbane, Abdellatif and El Koutbi, Mohammed and Azizi, Mostafa}, editor = {Essaaidi, M and Zaz, Y} } @conference {Koulali201247, title = {QDGRP: A hybrid QoS Distributed Genetic routing protocol for Wireless Sensor Networks}, booktitle = {Proceedings of 2012 International Conference on Multimedia Computing and Systems, ICMCS 2012}, year = {2012}, note = {cited By 2}, pages = {47-52}, doi = {10.1109/ICMCS.2012.6320271}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869852900\&doi=10.1109\%2fICMCS.2012.6320271\&partnerID=40\&md5=d66a629d14ff4cf85a93e8ce6708dbb9}, author = {Koulali, M.-A. and Kobbane, A. and El Koutbi, M. and Azizi, M.} } @article {8699111820120101, title = {Reduced Complexity Iterative Decoding of 3D-Product Block Codes Based on Genetic Algorithms.}, journal = {Journal of Electrical \& Computer Engineering}, year = {2012}, pages = {1 - 8}, abstract = {Two iterative decoding algorithms of 3D-product block codes (3D-PBC) based on genetic algorithms (GAs) are presented. The first algorithm uses the Chase-Pyndiah SISO, and the second one uses the list-based SISO decoding algorithm (LBDA) based on order-i reprocessing. We applied these algorithms over AWGN channel to symmetric 3D-PBC constructed from BCH codes. The simulation results show that the first algorithm outperforms the Chase-Pyndiah one and is only 1.38 dB away from the Shannon capacity limit at BER of 10-5 for BCH (31, 21, 5){\textthreesuperior} and 1.4 dB for BCH (16, 11, 4){\textthreesuperior}. The simulations of the LBDA-based GA on the BCH (16, 11, 4){\textthreesuperior} show that its performances outperform the first algorithm and is about 1.33 dB from the Shannon limit. Furthermore, these algorithms can be applied to any arbitrary 3D binary product block codes, without the need of a hard-in hardout decoder. We show also that the two proposed decoders are less complex than both Chase-Pyndiah algorithm for codes with large corr}, keywords = {Computational complexity, Computer simulation, Genetic algorithms, Iterative decoding, Performance evaluation, Three-dimensional imaging}, issn = {20900147}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=86991118\&site=ehost-live}, author = {Ahmadi, Abdeslam and Bouanani, Faissal El and Ben-Azza, Hussain and Benghabrit, Youssef} } @conference { ISI:000338165600142, title = {SOFTWARE COST ESTIMATION BY FUZZY ANALOGY FOR ISBSG REPOSITORY}, booktitle = {UNCERTAINTY MODELING IN KNOWLEDGE ENGINEERING AND DECISION MAKING}, series = {World Scientific Proceedings Series on Computer Engineering and Information Science}, volume = {7}, year = {2012}, note = {10th International Conference on Fuzzy Logic and Intelligent Technologies in Nuclear Science (FLINS), Istanbul, TURKEY, AUG 26-29, 2012}, pages = {863-868}, publisher = {Istanbul Tech Univ; Bahcesehir Univ; Belgian Nucl Res Ctr; Ghent Univ}, organization = {Istanbul Tech Univ; Bahcesehir Univ; Belgian Nucl Res Ctr; Ghent Univ}, abstract = {Software cost estimation is one of the most important and complex tasks in software project management. As a result, several techniques for estimating development effort have been suggested. Fuzzy Analogy is one of these techniques suggested to estimate project effort when it is described either by linguistic or numerical values. Based on reasoning by analogy and fuzzy logic, this technique uses fuzzy representation of software project attributes by using expert knowledge or clustering techniques. From this work, we evaluate the accuracy of this approach to estimate the software effort using the International Software Benchmarking Standards Group (ISBSG) repository.}, isbn = {978-981-4417-73-0}, author = {Idri, Ali and Amazal, Fatima Azzahra}, editor = {Kahraman, C and Kerre, EE and Bozbura, FT} } @conference {Idri2012863, title = {Software cost estimation by fuzzy analogy for ISBSG repository}, booktitle = {World Scientific Proc. Series on Computer Engineering and Information Science 7; Uncertainty Modeling in Knowledge Engineering and Decision Making - Proceedings of the 10th International FLINS Conf.}, volume = {7}, year = {2012}, note = {cited By 0}, pages = {863-868}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84892659716\&partnerID=40\&md5=2001b51ce67b133f56f1ca779fc56611}, author = {Idri, A. and Amazal, F.A.} } @article {7391589120120201, title = {Software Productivity: Harmonization in ISO/IEEE Software Engineering Standards.}, journal = {Journal of Software (1796217X)}, volume = {7}, number = {2}, year = {2012}, pages = {462 - 470}, abstract = {The software productivity is an important key of software quality factors. The productivity measure has become a tool for managers since it is used to compare the performance between different companies (benchmarking) and to compare the efficiency of different developers in the same company. Therefore, it allows doing strategic planning and decision making based on such measurement. A variety of international standardization bodies such as IEEE and ISO as well as software engineering researchers have proposed a set of factors which influence the software productivity attribute, and also a set of measures to evaluate it. However, there is no unique model that integrates all the software productivity best practices. The aim of this paper is to survey the available international standards and research work on software productivity and figure out the key differences in order to propose a standards-based model. Such model will include the set of quality attributes that could be used to ref}, keywords = {Benchmark problems (Computer science), Computer programmers, Decision making, IEEE 802.11 (Standard), IEEE Std. 1045, International Organization for Standardization, ISO 9126, Measurement, Measurements, Productivity Drivers, Quality Attributes, Quality Models, Software Developers Productivity, Standards}, issn = {1796217X}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=73915891\&site=ehost-live}, author = {Cheikhi, Laila and Al-Qutaish, Rafa E. and Idri, Ali} } @article {Cheikhi2012462, title = {Software productivity: Harmonization in ISO/IEEE software engineering standards}, journal = {Journal of Software}, volume = {7}, number = {2}, year = {2012}, note = {cited By 2}, pages = {462-470}, doi = {10.4304/jsw.7.2.462-470}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84857929017\&doi=10.4304\%2fjsw.7.2.462-470\&partnerID=40\&md5=55088a24a8af596776fe0e2ddf74892c}, author = {Cheikhi, L. and Al-Qutaish, R.E. and Idri, A.} } @conference {Benamar20123602, title = {Time to contact estimation on paracatadioptric cameras}, booktitle = {Proceedings - International Conference on Pattern Recognition}, year = {2012}, note = {cited By 2}, pages = {3602-3605}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874558961\&partnerID=40\&md5=ed024edcf33e2e2b82c2adf2ecc505f3}, author = {Benamar, F. and Demonceaux, C. and Fkihi, S.E. and Mouaddib, E. and Aboutajdine, D.} } @conference { ISI:000325229700068, title = {Towards Contract-Based Approach for Quality-Driven Service Component Architecture}, booktitle = {2012 SECOND INTERNATIONAL CONFERENCE ON INNOVATIVE COMPUTING TECHNOLOGY (INTECH)}, year = {2012}, note = {2nd International Conference on Innovative Computing Technology (INTECH), Casablanca, MOROCCO, SEP 18-20, 2012}, pages = {377-384}, publisher = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, organization = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, abstract = {Service Component Architecture (SCA) is a recent approach and an industry standard for developing complex and distributed systems. Despite the growing research work it still lacks a formal basis for handling trust and reliability of quality-driven systems. In this paper we present main techniques and models for assuring quality and trustworthiness of component-based systems in general, and then we present and justify the choice of the design by contract approach that we adopted for the following of our research about SCA-based systems. Design by Contract is an approach of software design that aims to address reliability and quality issues in software development by expressing a set of its properties and constraints.}, isbn = {978-1-4673-2679-7}, author = {Rhanoui, Maryem and El Asri, Bouchra} } @conference {Rhanoui2012377, title = {Towards contract-based approach for quality-driven Service Component Architecture}, booktitle = {2nd International Conference on Innovative Computing Technology, INTECH 2012}, year = {2012}, note = {cited By 0}, pages = {377-384}, doi = {10.1109/INTECH.2012.6457754}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874497855\&doi=10.1109\%2fINTECH.2012.6457754\&partnerID=40\&md5=b34258865186cc7e811056d11b5ead41}, author = {Rhanoui, M. and Asri, B.E.} } @conference {Faqihi2012134, title = {Towards the implementation of semantic interoperability of information systems via technique of matching: Issue and prospective}, booktitle = {2012 International Conference on Interactive Mobile and Computer Aided Learning, IMCL 2012}, year = {2012}, note = {cited By 0}, pages = {134-140}, doi = {10.1109/IMCL.2012.6396464}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84873168694\&doi=10.1109\%2fIMCL.2012.6396464\&partnerID=40\&md5=fc312da1cafe93b0fe3d989cfbcc537c}, author = {Faqihi, B. and Daoudi, N. and Ajhoun, R.} } @conference { ISI:000315346700024, title = {Towards the implementation of semantic interoperability of information systems via technique of matching: Issue and prospective}, booktitle = {2012 INTERNATIONAL CONFERENCE ON INTERACTIVE MOBILE AND COMPUTER AIDED LEARNING (IMCL)}, year = {2012}, note = {International Conference on Interactive Mobile and Computer Aided Learning (IMCL), Princess Sumaya Univ Technol (PSUT), Amman, JORDAN, NOV 06-08, 2012}, pages = {134-140}, publisher = {IEEE; Carinthia Tech Inst; IAOE; IELA; IGIP; Online Journals}, organization = {IEEE; Carinthia Tech Inst; IAOE; IELA; IGIP; Online Journals}, abstract = {It is very important first to state that the emergence of the e-commerce and e-government or the electronic libraries perform a necessity to have an access to multiple resources of information. Otherwise, these sources are not necessarily homogenous, either on the syntactic or semantic standard. For the semantic component, the conflicts appear when the systems don{\textquoteright}t have the some interpretation of exchanged information. Let me give you example which I think bring out what I mean by this, the existing systems are completely heterogeneous, geographically remote and developed under different platforms in the field of E-learning yet, they need to collaborate, exchange, and re-use the data and services by keeping the same meaning. In other words, the user (learner) must have transparent way in all these resources and information in which he is in need although He belongs to other difference. Despite the fact that these resources and information{\textquoteright}s interpretations don{\textquoteright}t change on system to another, this issue is well-known by semantic interoperability in the research field of information. This concept in identified by the ability to interpret and use exchanged data not only the content but also in the meaning. {[}01] Many solutions can be suggested by relying on semantic web approach and basing on ontology. These are instances by which we can describe the structure, semantic content and system{\textquoteright}s objective. This detailed description permits the systems to understand the semantic of exchanged data, to facilitate their localization and to ensure their integration. {[}03] Our direction in this research aimed at finding on approach which responds to semantic issue in domain of learning at the moment the exchange of the information of each interpreted system. And analyze the shared content in its own ontology and treat it in coherent manner without losing its meaning. The major challenge of this approach is how to compare and identify the correspondence among concepts of different ontology systems. There are three major approaches; we can state them as follow: integrated approach, the federated approach and the unified approach. {[}03] Our article is intended to elaborate a comparative study between these approaches, to study the benefits and limits of each approach and perform hosed choice. This technique might permit to a good response to the problem of semantic interoperability.}, isbn = {978-1-4673-4924-6}, author = {Faqihi, B. and Daoudi, N. and Ajhoun, R.} } @article {8885929720120901, title = {Using Aspect-Oriented State Machines for Detecting and Resolving Feature Interactions.}, journal = {Computer Science \& Information Systems}, volume = {9}, number = {3}, year = {2012}, pages = {1046 - 1074}, abstract = {The article focuses on the use of aspect-oriented state machines that helps in detecting feature interactions. It mentions that domain- specific language (DSL) is introduced to manage Finite State Machines with pattern matching technique. It mentions that in software product-line (SPL) engineers divides a software system into functional features through feature model.}, keywords = {aspect interactions, Aspect-oriented programming, Computer hardware description languages, Computer software, conflict detection, conflict resolution, domain-specific aspect languages, feature interactions, Finite state machines (Computer science), formal methods, Image registration, object-oriented programming, state machines}, issn = {18200214}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=88859297\&site=ehost-live}, author = {Dinkelaker, Tom and Erradi, Mohammed and Ayache, Meryeme} } @article {Dinkelaker20121045, title = {Using aspect-oriented state machines for detecting and resolving feature interactions}, journal = {Computer Science and Information Systems}, volume = {9}, number = {3}, year = {2012}, note = {cited By 2}, pages = {1045-1074}, doi = {10.2298/CSIS111216033D}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84867471411\&doi=10.2298\%2fCSIS111216033D\&partnerID=40\&md5=a033e4838afaf44fbd961002569f0da2}, author = {Dinkelaker, T. and Erradi, M. and Ayache, M.} } @article { ISI:000309649500004, title = {Using Aspect-Oriented State Machines for Detecting and Resolving Feature Interactions}, journal = {COMPUTER SCIENCE AND INFORMATION SYSTEMS}, volume = {9}, number = {3, SI}, year = {2012}, month = {SEP}, pages = {1045-1074}, abstract = {Composing different features in a software system may lead to conflicting situations. The presence of one feature may interfere with the correct functionality of another feature, resulting in an incorrect behavior of the system. In this work we present an approach to manage feature interactions. A formal model, using Finite State Machines (FSM) and Aspect-Oriented (AO) technology, is used to specify, detect and resolve features interactions. In fact aspects can resolve interactions by intercepting the events which causes troubleshoot. Also a Domain-Specific Language (DSL) was developed to handle Finite State Machines using a pattern matching technique.}, issn = {1820-0214}, doi = {10.2298/CSIS111216033D}, author = {Dinkelaker, Tom and Erradi, Mohammed and Ayache, Meryeme} } @conference {Souali2011, title = {An automatic ethical-based recommender system for e-commerce}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 2}, doi = {10.1109/ICMCS.2011.5945631}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961215460\&doi=10.1109\%2fICMCS.2011.5945631\&partnerID=40\&md5=8ab17e424b6230f9880c1381229a5999}, author = {Souali, K. and El Afia, A. and Faizi, R.} } @conference {Hafiddi2011176, title = {A context-aware service centric approach for service oriented architectures}, booktitle = {ICEIS 2011 - Proceedings of the 13th International Conference on Enterprise Information Systems}, volume = {3 ISAS}, year = {2011}, note = {cited By 4}, pages = {176-183}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865112643\&partnerID=40\&md5=2dc55c40ac4f93da3355ed544dba2d19}, author = {Hafiddi, H. and Nassar, M. and Baidouri, H. and El Asri, B. and Kriouile, A.} } @conference { ISI:000310937500040, title = {A Dynamic Timestamp Discrepancy against Replay Attacks in MANET}, booktitle = {INFORMATICS ENGINEERING AND INFORMATION SCIENCE, PT IV}, series = {Communications in Computer and Information Science}, volume = {254}, year = {2011}, note = {International Conference on Informatics Engineering and Information Science (ICIEIS 2011), Univ Teknol Malaysia, Kuala Lumpur, MALAYSIA, NOV 14-16, 2011}, pages = {479+}, publisher = {Springer}, organization = {Springer}, abstract = {Mobile Ad hoc NETworks (MANETs), like traditional networks, are vulnerable to a wide range of Denial-of-Service (DoS) attacks. A Replay attack is one of them that degrade severely the MANET performance. A replay attacker performs this attack by interception and retransmission of the valid signed messages. The validation of signed messages is verified by a timestamp discrepancy fixed by sender and receiver nodes. In this paper, we propose an enhancement of the timestamp concept, to avoid replay attack, characterized by two properties. The first is dynamic because it depends on the communication duration between sender and receiver nodes. The second is rigid because it estimates approximately the maximum date when the signed message should arrive to receive node.}, isbn = {978-3-642-25482-6; 978-3-642-25483-3}, issn = {1865-0929}, author = {Enneya, Nourddine and Baayer, Aziz and Elkoutbi, Mohammed}, editor = {AbdManaf, A and Sahibuddin, S and Ahmad, R and Daud, SM and ElQawasmeh, E} } @conference {Chana2011, title = {An efficient new soft-decision decoding algorithm for binary cyclic codes}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 4}, doi = {10.1109/ICMCS.2011.5945580}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961236931\&doi=10.1109\%2fICMCS.2011.5945580\&partnerID=40\&md5=00f5f52d11fff70371c5fb428973b573}, author = {Chana, I. and Allouch, H. and Belkasmi, M.} } @article {Ouacha2011607, title = {Energy consumption of mobile intelligent system}, journal = {International Review on Computers and Software}, volume = {6}, number = {4}, year = {2011}, note = {cited By 1}, pages = {607-614}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-81255192184\&partnerID=40\&md5=b26045da71d43042643a0b6b4648f535}, author = {Ouacha, A. and Lakki, N. and Habbani, A. and Oubaha, J. and Elkoutbi, M. and Abbadi, J.E.} } @article {Ajana201119, title = {FlexRFID middleware in the supply chain: Strategic values and challenges}, journal = {International Journal of Mobile Computing and Multimedia Communications}, volume = {3}, number = {2}, year = {2011}, note = {cited By 0}, pages = {19-32}, doi = {10.4018/jmcmc.2011040102}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052879454\&doi=10.4018\%2fjmcmc.2011040102\&partnerID=40\&md5=3256e261ddc2de34d29749a6b36fe5bf}, author = {Ajana, M.E. and Harroud, H. and Boulmalf, M. and Elkoutbi, M.} } @conference {Anwar2011188, title = {A formal approach to model composition applied to VUML}, booktitle = {Proceedings - 2011 16th IEEE International Conference on Engineering of Complex Computer Systems, ICECCS 2011}, year = {2011}, note = {cited By 7}, pages = {188-197}, doi = {10.1109/ICECCS.2011.26}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960496037\&doi=10.1109\%2fICECCS.2011.26\&partnerID=40\&md5=90c65ef43cd980578796730f38a6b5dd}, author = {Anwar, A. and Dkaki, T. and Ebersold, S. and Coulette, B. and Nassar, M.} } @conference { ISI:000298664200019, title = {A Formal Approach to Model Composition Applied to VUML}, booktitle = {2011 16TH IEEE INTERNATIONAL CONFERENCE ON ENGINEERING OF COMPLEX COMPUTER SYSTEMS (ICECCS)}, year = {2011}, note = {IEEE Int Conf/Workshops on Engineering of Autonomic and Autonomous Systems/Int Conf on the Engineering of Computer-Based Systems/Int Conf on the Engineering of Complex Computer Systems, Las Vegas, NV, APR 27-29, 2011}, pages = {188-197}, publisher = {IEEE; IEEE Comp Soc Tech Comm Engn Autonomous \& Autonom Syst (TC-AAS); IEEE Comp Soc}, organization = {IEEE; IEEE Comp Soc Tech Comm Engn Autonomous \& Autonom Syst (TC-AAS); IEEE Comp Soc}, abstract = {Several approaches adopted by the software engineering community rely on the principle of multi-modeling which allows to separate concerns and to model a system as a set of less complex sub-models. Model composition is a crucial activity in Model Driven Engineering (MDE). It is particularly useful when adopting a multi-modeling approach to analyze and design software systems. In previous work, we have defined a view-based UML profile called VUML. In this paper, we describe a formal approach for model composition in which we consider the composition as an algebraic operator on the set of UML-compliant models. We specify the semantics of our composition operator by means of graph transformations. Furthermore, we present a composition scheme interpreted by a two-steps composition process based on two strategies of correspondence and merging. To illustrate our approach, we apply it to the composition of UML class models diagrams into one VUML model class diagram.}, isbn = {978-0-7695-4381-9}, doi = {10.1109/ICECCS.2011.26}, author = {Anwar, Adil and Dkaki, Taoufiq and Ebersold, Sophie and Coulette, Bernard and Anwar, Adil and Nassar, Mahmoud}, editor = {Perseil, I and Breitman, K and Sterritt, R} } @conference {Azouaoui2011, title = {A genetic algorithm to search of good double-circulant codes}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 2}, doi = {10.1109/ICMCS.2011.5945582}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961229274\&doi=10.1109\%2fICMCS.2011.5945582\&partnerID=40\&md5=f33d2b1f9eced1a4a2448d6a0986b0d1}, author = {Azouaoui, A. and Askali, M. and Belkasmi, M.} } @conference {Angoma2011101, title = {HaVe-2W3G: A vertical handoff solution between WLAN, WiMAX and 3G networks}, booktitle = {IWCMC 2011 - 7th International Wireless Communications and Mobile Computing Conference}, year = {2011}, note = {cited By 8}, pages = {101-106}, doi = {10.1109/IWCMC.2011.5982514}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052450203\&doi=10.1109\%2fIWCMC.2011.5982514\&partnerID=40\&md5=678334f04e9317df81cc23e2cd3ea407}, author = {Angoma, B. and Erradi, M. and Benkaouz, Y. and Berqia, A. and Charaf Akalay, M.} } @conference { ISI:000300570200017, title = {HaVe-2W3G: A Vertical Handoff Solution between WLAN, WiMAX and 3G Networks}, booktitle = {2011 7TH INTERNATIONAL WIRELESS COMMUNICATIONS AND MOBILE COMPUTING CONFERENCE (IWCMC)}, series = {International Wireless Communications and Mobile Computing Conference}, year = {2011}, note = {7th IEEE International Wireless Communications and Mobile Computing Conference (IWCMC), Istanbul, TURKEY, JUL 04-08, 2011}, pages = {101-106}, publisher = {IEEE; IEEE Turkey sect; Huawei}, organization = {IEEE; IEEE Turkey sect; Huawei}, abstract = {The demand for the ubiquitous service is increasing due to the rapidly growing demand for increased data rates, mobile Internet and the diversity of wireless communication technologies. Also due to the challenges to interconnect heterogeneous network technologies and to offer ubiquitous services, telecommunications operators look after the best way to provide continuity of service during handover and how to give the mobile client the possibility to get the best connection anywhere and anytime. In this paper we propose an architecture and its implementation which guarantees the continuity of service during a communication in the context of heterogeneous access network technologies. The suggested solution named HaVe-2W3G (Handover Vertical WLAN WiMAX 3G) ensures a Vertical handover between heterogeneous access networks technologies: WLAN, WiMAX and 3G. A performance evaluation of such implementation is shown using a streaming application.}, isbn = {978-1-4244-9537-5}, issn = {2376-6492}, author = {Angoma, Blaise and Erradi, Mohammed and Benkaouz, Yahya and Berqia, Amine and Akalay, Mohammed Charaf} } @article {6016823720110101, title = {Interoperability Monitoring for eGovernment Service Delivery Based on Enterprise Architecture.}, journal = {Proceedings of the European Conference on Information Management \& Evaluation}, year = {2011}, pages = {169 - 180}, abstract = {Public administration has to prepare itself to deliver fully integrated eGovernment services. This delivery often requires cooperation via business processes interoperability across two or more departments. In this context, public departments and agencies need to implement interoperability using enterprise architecture techniques to structure business processes, and service oriented models to achieve their integration. Thus, it{\textquoteright}s quite interesting to adopt enterprise architecture paradigm and techniques to analyse, track and control the evolution degree of processes interoperability from the existing "as-is" state to the future "to-be" state. The present paper proposes a periodic monitoring approach based on an assessment method which considers three main aspects of interoperation: 1. Potentiality, reflecting the preparation to interoperate. The objective is to foster interoperation readiness by eliminating barriers that may obstruct the interaction. 2. Compatibility, referring to int}, keywords = {and eHealth, communication, Computer software, enterprise architecture, integrated public eService, Internet, Internetworking (Telecommunication), interoperability assessment, Local government, Organizational governance, periodic monitoring, Public administration, Public hospitals}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=bth\&AN=60168237\&site=ehost-live}, author = {Elmir, Badr and Alrajeh, Nabil and Bounabat, Bouchaib} } @conference { ISI:000307329300033, title = {A Learning Driven Model for ERP Software Selection Based on the Choquet Integral: Small and Medium Enterprises Context}, booktitle = {DIGITAL ENTERPRISE AND INFORMATION SYSTEMS}, series = {Communications in Computer and Information Science}, volume = {194}, year = {2011}, note = {International Conference on Digital Enterprise and Information Systems, London Metropolitan Univ, London, ENGLAND, JUL 20-22, 2011}, pages = {358-371}, publisher = {Springer}, organization = {Springer}, abstract = {Historically, Enterprise Resource Planning (ERP) systems were initially destined to large companies in order to standardize and streamline their key business processes. Recently, they have been increasingly adopted by Small and Medium Enterprises (SMEs). However, making strategic tradeoffs among the various marketplace solutions is a troublesome balance task for SMEs without the rescue of systematic decision approaches. This paper addresses the question of how to choose an ERP solution that best suits a given SME. It serves twofold objectives; firstly it defines a set of selection criteria related to SMEs{\textquoteright} context. Secondly. it presents a selection methodology based on the construction of an induced decision model through capturing the decision maker{\textquoteright}s preferences. The key contribution of this paper is the introduction of a new iterative learning based approach destined to make enlightened decisions through the consideration of interdependencies among the adopted selection criteria thanks to the Choquet integral.}, isbn = {978-3-642-22602-1}, issn = {1865-0929}, author = {Khaled, Abdelilah and Idrissi, Mohammed Abdou Janati}, editor = {Ariwa, E and ElQawasmeh, E} } @conference {ElAichi2011591, title = {Maturity models transition from eGovernment interoperability to t-government: Restyling dynamic public services through integrated transformation of service delivery}, booktitle = {Proceedings of the European Conference on e-Government, ECEG}, year = {2011}, note = {cited By 0}, pages = {591-602}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84870934201\&partnerID=40\&md5=325c84d83665729bf6b3ba713949b9f2}, author = {El Aichi, M.M.E. and El Kettani, M.D.E.-C.} } @conference { ISI:000300256400069, title = {Maturity Models Transition from eGovernment Interoperability to T-Government: Restyling Dynamic Public Services Through Integrated Transformation of Service Delivery}, booktitle = {PROCEEDINGS OF THE 11TH EUROPEAN CONFERENCE ON EGOVERNMENT}, year = {2011}, note = {11th European Conference on e-Government (ECEG), Univ Ljubljana, Fac Adm, Ljubljana, SLOVENIA, JUN 16-17, 2011}, pages = {591-602}, abstract = {Transforming public services through a global, common, measurable, achievable vision and clear objectives, the use of Information and Communication Technology and referral to services focused on the citizens needs have an important effect on economic, social and cultural life across the country that extends globally. The aim of this paper is to outline the part of the transformation of government services by defining the guidelines for implementation and monitoring procedures necessary to support any model of maturity to ensure the successful transformation of public services in a safe flexible and adaptable. The model we seek to build is based on best practices in service management, governance, enterprise architecture, and implementation of Information Technology.}, isbn = {978-1-908272-00-3}, author = {El Aichi, Mohamed Mohyi Eddine and El Kettani, Mohamed Dafir Ech-Cherif}, editor = {Klun, M and Decman, M and Jukic, T} } @conference { ISI:000298749600093, title = {Metamodels for models complete integration}, booktitle = {2011 IEEE INTERNATIONAL CONFERENCE ON INFORMATION REUSE AND INTEGRATION (IRI)}, year = {2011}, note = {12th IEEE International Conference on Information Reuse and Integration (IRI)/1st International Workshop on Issues and Challenges in Social Computing (WICSOC), Las Vegas, NV, AUG 03-05, 2011}, pages = {496-499}, publisher = {IEEE Syst, Man \& Cybernet Soc (IEEE SMC); Soc Informat Reuse \& Integrat (SIRI); IEEE}, organization = {IEEE Syst, Man \& Cybernet Soc (IEEE SMC); Soc Informat Reuse \& Integrat (SIRI); IEEE}, abstract = {Problems occur in the models integration and particularly, in the comparison phase of the integration. Previous approaches have not correctly handled the semantic integration. We propose a complete approach for integrating of UML models. First, we provide a comparison approach incorporating syntactic, structural and semantic aspects in order to detect any correspondence between the models to compare, and secondly, a semantic approach to integrate the compared models. For this purpose, we use domain ontology. In this paper, our contribution is the proposition of the different metamodels needed in the comparison and the integration phases.}, isbn = {978-1-4577-0966-1}, author = {Benabdellah Chaouni, Samia and Fredj, Mounia and Mouline, Salma}, editor = {Alhajj, R and Joshi, J and Shyu, ML} } @conference {Hafiddi2011, title = {A model driven approach for context-aware services development}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 6}, doi = {10.1109/ICMCS.2011.5945628}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961237316\&doi=10.1109\%2fICMCS.2011.5945628\&partnerID=40\&md5=b09e1287fcbf49b9b8cc1fb6ff702c95}, author = {Hafiddi, H. and Baidouri, H. and Nassar, M. and Asri, B.E. and Kriouile, A.} } @conference {Benhaddi2011116, title = {Model-driven approach for user-centric mashuped SOA}, booktitle = {ICEIS 2011 - Proceedings of the 13th International Conference on Enterprise Information Systems}, volume = {4 SAIC}, number = {HCI/-}, year = {2011}, note = {cited By 0}, pages = {116-123}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865131265\&partnerID=40\&md5=89e560fddfcd6a080a15a904ebd4e8a9}, author = {Benhaddi, M. and Baina, K. and Abdelwahed, E.H.} } @conference {Souali2011, title = {A new recommender system for e-learning environments}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 4}, doi = {10.1109/ICMCS.2011.5945630}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961232743\&doi=10.1109\%2fICMCS.2011.5945630\&partnerID=40\&md5=4d89bd30150afdd564b7caedffba4902}, author = {Souali, K. and El Afia, A. and Faizi, R. and Chiheb, R.} } @conference {Bouirouga2011, title = {Recognition of adult video by combining skin detection features with motion information}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 1}, doi = {10.1109/ICMCS.2011.5945570}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961239374\&doi=10.1109\%2fICMCS.2011.5945570\&partnerID=40\&md5=96e36065eca2afa2f2e3a18e0d2b4fe5}, author = {Bouirouga, H. and Lrit, S.E. and Jilbab, A. and Aboutajdine, D.} } @article {Chiheb201169, title = {Using objective online testing tools to assess students{\textquoteright} learning: Potentials and limitations}, journal = {Journal of Theoretical and Applied Information Technology}, volume = {24}, number = {1}, year = {2011}, note = {cited By 1}, pages = {69-72}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79951845409\&partnerID=40\&md5=8dfb6d5a464b4a730925a6ff3009b592}, author = {Chiheb, R. and Faizi, R. and El Afia, A.} } @conference {Elbhiri2011, title = {UWB based maximizing network liftime with route selection strategies for wireless sensor networks}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 0}, doi = {10.1109/ICMCS.2011.5945636}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961216854\&doi=10.1109\%2fICMCS.2011.5945636\&partnerID=40\&md5=3640a80cc17beb1f99c1d6741aece8e9}, author = {Elbhiri, B. and Saadane, R. and Fkihi, S.E. and Aboutajdine, D. and Wahbi, M.} } @conference {Anoual2011, title = {Vehicle license plate detection in images}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 3}, doi = {10.1109/ICMCS.2011.5945680}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961227182\&doi=10.1109\%2fICMCS.2011.5945680\&partnerID=40\&md5=0640cc288461c0ff15b817b0c5d2a10b}, author = {Anoual, H. and Fkihi, S.E. and Jilbab, A. and Aboutajdine, D.} } @conference {ElHaroussi2011, title = {VHDL design and FPGA implementation of weighted majority logic decoders}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 0}, doi = {10.1109/ICMCS.2011.5945599}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961233273\&doi=10.1109\%2fICMCS.2011.5945599\&partnerID=40\&md5=07df2e3fcbba980a7e899b84c1e869ff}, author = {El Haroussi, M. and Ayoub, F. and Belkasmi, M.} } @conference {Elbhiri2010, title = {Clustering in Wireless Sensor Networks based on near Optimal bi-partitions}, booktitle = {6th Euro NF Conference on Next Generation Internet, NGI 2010 - Proceedings}, year = {2010}, note = {cited By 10}, doi = {10.1109/NGI.2010.5534469}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77956440742\&doi=10.1109\%2fNGI.2010.5534469\&partnerID=40\&md5=bdb3af7a0347d03735950c66675c8253}, author = {Elbhiri, B. and Fkihi, S.E. and Saadane, R. and Aboutajdine, D.} } @conference {Elbhiri2010, title = {Developed Distributed Energy-Efficient Clustering (DDEEC) for heterogeneous wireless sensor networks}, booktitle = {2010 5th International Symposium on I/V Communications and Mobile Networks, ISIVC 2010}, year = {2010}, note = {cited By 2}, doi = {10.1109/ISVC.2010.5656252}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78651495359\&doi=10.1109\%2fISVC.2010.5656252\&partnerID=40\&md5=04c6f29644071a785006aab6b93c9c27}, author = {Elbhiri, B. and Rachid, S. and El Fkihi, S. and Aboutajdine, D.} } @conference {Aouinatou2010, title = {Efficient anonymity for cock{\textquoteright}s scheme}, booktitle = {2010 5th International Symposium on I/V Communications and Mobile Networks, ISIVC 2010}, year = {2010}, note = {cited By 0}, doi = {10.1109/ISVC.2010.5656188}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78651517817\&doi=10.1109\%2fISVC.2010.5656188\&partnerID=40\&md5=3716f9f5370c27dbe57da8eccadc6063}, author = {Aouinatou, R. and Belkasmi, M.} } @conference {Anoual2010, title = {Features extraction for text detection and localization}, booktitle = {2010 5th International Symposium on I/V Communications and Mobile Networks, ISIVC 2010}, year = {2010}, note = {cited By 1}, doi = {10.1109/ISVC.2010.5656284}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78651516449\&doi=10.1109\%2fISVC.2010.5656284\&partnerID=40\&md5=7432f9409c4d2e3bd7a3aa3a78c5865a}, author = {Anoual, H. and Aboutajdine, D. and Elfkihi, S. and Jilbab, A.} } @article {Ayoub20102021, title = {Iterative decoding of generalized parallel concatenated OSMLD codes}, journal = {Applied Mathematical Sciences}, volume = {4}, number = {41-44}, year = {2010}, note = {cited By 4}, pages = {2021-2038}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77957011978\&partnerID=40\&md5=cbd626b2d840f6a619631feb064ba577}, author = {Ayoub, F. and Belkasmi, M. and Chana, I.} } @article {Anoual2010157, title = {New approach based on texture and geometric features for text detection}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {6134 LNCS}, year = {2010}, note = {cited By 0}, pages = {157-164}, doi = {10.1007/978-3-642-13681-8_19}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79956278929\&doi=10.1007\%2f978-3-642-13681-8_19\&partnerID=40\&md5=c16c63472198a6944f7f472697a0cc7c}, author = {Anoual, H. and El Fkihi, S. and Jilbab, A. and Aboutajdine, D.} } @conference {Rouijel2010, title = {A new approach for wireless communication systems based on IDMA technique}, booktitle = {2010 5th International Symposium on I/V Communications and Mobile Networks, ISIVC 2010}, year = {2010}, note = {cited By 0}, doi = {10.1109/ISVC.2010.5656727}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78651500725\&doi=10.1109\%2fISVC.2010.5656727\&partnerID=40\&md5=b456d23b9a760f866b87f12544c1ba42}, author = {Rouijel, A. and Nsiri, B. and Faqihi, A. and Aboutajdine, D.} } @conference { ISI:000289216200022, title = {A New Task Engineering Approach for Workflow Access Control}, booktitle = {COMPUTATIONAL INTELLIGENCE IN SECURITY FOR INFORMATION SYSTEMS 2010}, series = {Advances in Intelligent and Soft Computing}, volume = {85}, year = {2010}, note = {3rd International Conference on Computational Intelligence in Security for Information Systems, Leon, SPAIN, NOV 11-12, 2010}, pages = {203-212}, publisher = {Junta Castilla Leon; Supercomputing Ctr Castilla Leon; Univ Leon; IEEE SECCION ESPANA; IEEE Syst Man \& Cybernet Spanish Chapter; MIR Lab; Int Fed Comp Logic}, organization = {Junta Castilla Leon; Supercomputing Ctr Castilla Leon; Univ Leon; IEEE SECCION ESPANA; IEEE Syst Man \& Cybernet Spanish Chapter; MIR Lab; Int Fed Comp Logic}, abstract = {Security and particularly Access control is a crucial issue for workflow management systems (WFMS). RBAC (Role based Access Control) model seems to be suitable for enforcing access control policies in such systems. However, without an effective role engineering process -at administration time- that aims to reduce conflicting situations. using RBAC could be more complex and less useful. Additionally to role engineering, a {\textquoteleft}task engineering{\textquoteright} process at run time- could be useful to allow the satisfaction of access control constraints in even critical situations. In this paper, we identify task engineering as a process to examine the granularity of each workflow{\textquoteright}s task in a way to meet at run time- the main workflow access control requirements, precisely the least privilege and separation of duties principles. Then, we propose an approach of task engineering to improve security enforcement in WFMS. This approach uses an intelligent method namely the Constraints Satisfaction Problem (CSP) formulation and resolution method.}, isbn = {978-3-642-16625-9}, issn = {1867-5662}, author = {El Bakkali, Hanan and Hatim, Hamid and Berrada, Ilham}, editor = {Herrero, A and Corchado, E and Redondo, C and Alonso, A} } @article {4980748920100419, title = {Stacked complementary metasurfaces for ultraslow microwave metamaterials.}, journal = {Applied Physics Letters}, volume = {96}, number = {16}, year = {2010}, pages = {164103}, abstract = {We have experimentally realized at microwaves a dual-band ultraslow regime by constructing a metamaterial based upon the alternative stack of conventional- and complementary-split-ring-resonators-surfaces. The group delay reaches values larger than two orders of magnitude than those obtained when the electromagnetic wave propagates the same thickness in free-space. The ultraslow waves have been initially predicted by a numerical eigenmode analysis and finite-integration frequency domain simulations. Such ultraslow modes can be integrated into free-space technology for spatial delay lines, and traveling wave amplifier as well as sensors due to the enhanced interaction between different beams or radiation and matter. [ABSTRACT FROM AUTHOR]}, keywords = {Electromagnetic waves, Metamaterials, Microwaves, Resonators, Simulation methods \& models}, issn = {00036951}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=49807489\&site=ehost-live}, author = {Navarro-C{\'\i}a, M. and Aznabet, M. and Beruete, M. and Falcone, F. and El Mrabet, O. and Sorolla, M. and Essaaidi, M.} } @conference { ISI:000271458300071, title = {Code Generation in VUML Profile: A Model Driven Approach}, booktitle = {2009 IEEE/ACS INTERNATIONAL CONFERENCE ON COMPUTER SYSTEMS AND APPLICATIONS, VOLS 1 AND 2}, year = {2009}, note = {7th ACS/IEEE International Conference on Computer Systems and Applications (AICCSA-09), Rabat, MOROCCO, MAY 10-OCT 13, 2009}, pages = {412-419}, publisher = {ACS; IEEE}, organization = {ACS; IEEE}, abstract = {VUML is a view-based analysis/design method offering a formalism (UML profile) and a process to model software systems through objects and views. The main extension to UML is the multiview class whose goal is to store and deliver information according to the user{\textquoteright}s viewpoint. VUML supports the dynamic change of viewpoints and offers mechanisms to describe view dependencies. On the semantics side, the VUML meta-model extends the UML one with OCL rules. VUML proposes also a generic implementation pattern to generate the object code corresponding to a VUML class diagram. The proposed pattern uses delegation, handle technique and polymorphism. to implement views. In this paper, we present a model driven implementation of this pattern, which integrates the multi-user aspect and supports Java language. We propose a code generation technique that respects the pattern and uses the MDA approach. We specify transformations as a collection of rules implemented in ATL. The proposal is illustrated by a simple example.}, isbn = {978-1-4244-3807-5}, doi = {10.1109/AICCSA.2009.5069358}, author = {Nassar, Mahmoud and Anwar, Adil and Ebersold, Sophie and Elasri, Bouchra and Coulette, Bernard and Kriouile, Abdelaziz} } @conference {Nassar2009412, title = {Code generation in VUML profile: A model driven approach}, booktitle = {2009 IEEE/ACS International Conference on Computer Systems and Applications, AICCSA 2009}, year = {2009}, note = {cited By 4}, pages = {412-419}, doi = {10.1109/AICCSA.2009.5069358}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349911813\&doi=10.1109\%2fAICCSA.2009.5069358\&partnerID=40\&md5=e4b947f9a88ea213061c85e81691b486}, author = {Nassar, M. and Anwar, A. and Ebersold, S. and Elasri, B. and Coulette, B. and Kriouile, A.} } @conference {Kenzi20099, title = {Engineering adaptable service oriented systems: A model driven approach}, booktitle = {IEEE International Conference on Service-Oriented Computing and Applications, SOCA{\textquoteright} 09}, year = {2009}, note = {cited By 0}, pages = {9-16}, doi = {10.1109/SOCA.2009.5410473}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77950275155\&doi=10.1109\%2fSOCA.2009.5410473\&partnerID=40\&md5=3623db8d3a57913ef5e70a14c4782c49}, author = {Kenzi, A. and El Asri, B. and Nassar, M. and Kriouile, A.} } @article {Khafaji200923, title = {Impact of human bodies on millimetric bands propagation using ray tracing technique}, journal = {Physical and Chemical News}, volume = {50}, year = {2009}, note = {cited By 0}, pages = {23-31}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-76749122475\&partnerID=40\&md5=b6be17acabeae92674dae2050eac6732}, author = {Khafaji, A. and Saadane, R. and El Abbadi, J. and Belkasmi, M. and Farchi, A.} } @conference {Kenzi2009404, title = {A model driven framework for multiview service oriented system development}, booktitle = {2009 IEEE/ACS International Conference on Computer Systems and Applications, AICCSA 2009}, year = {2009}, note = {cited By 6}, pages = {404-411}, doi = {10.1109/AICCSA.2009.5069357}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349932401\&doi=10.1109\%2fAICCSA.2009.5069357\&partnerID=40\&md5=a9cadf073eddaaf45fb3c1676e6acd84}, author = {Kenzi, A. and El Asri, B. and Nassar, M. and Kriouile, A.} } @conference { ISI:000271458300070, title = {A model driven framework for multiview service oriented system development}, booktitle = {2009 IEEE/ACS INTERNATIONAL CONFERENCE ON COMPUTER SYSTEMS AND APPLICATIONS, VOLS 1 AND 2}, year = {2009}, note = {7th ACS/IEEE International Conference on Computer Systems and Applications (AICCSA-09), Rabat, MOROCCO, MAY 10-OCT 13, 2009}, pages = {404-411}, publisher = {ACS; IEEE}, organization = {ACS; IEEE}, abstract = {The challenge in the development of complex systems is to cope with the dynamic nature of the user{\textquoteright}s profile, especially when such systems are exposed to the large public through the Internet. In Service oriented computing (SOC), a service may interact with several types of service clients. The central problem is, therefore, how to model the multidimensional aspect of service clients{\textquoteright} needs and requirements. To tackle this problem, we propose in this paper the concept of the multiview service as a first class modeling entity that permits the representation of the needs and requirements of end-users by separating their concerns. Such adaptable services must carefully be analyzed and designed. However, current works on service oriented analysis and design (SOAD) largely focus on defining processes to develop business processes and services without considering user-aware variability. In the present paper we present an MDA framework for a muliview SOAD process for the development of highly adaptable services. We specify transformations as a collection of rules implemented in ATL. The proposal is illustrated by a simple example.}, isbn = {978-1-4244-3807-5}, doi = {10.1109/AICCSA.2009.5069357}, author = {Kenzi, Adil and El Asri, Bouchra and Nassar, Mahmoud and Kriouile, Abdelaziz} } @conference { ISI:000267425300017, title = {Multiview Components for User-Aware Web Services}, booktitle = {ENTERPRISE INFORMATION SYSTEMS-BK}, series = {Lecture Notes in Business Information Processing}, volume = {24}, year = {2009}, note = {11th International Conference on Enterprise Information Systems, Milan, ITALY, MAY 06-10, 2009}, pages = {196-207}, publisher = {Inst Syst \& Technol Informat, Control \& Commun; AAAI; ACM SIGMIS; Japanese IEICE SWIM; Workflow Management Coalit}, organization = {Inst Syst \& Technol Informat, Control \& Commun; AAAI; ACM SIGMIS; Japanese IEICE SWIM; Workflow Management Coalit}, abstract = {Component based software (CBS) intends to meet the need of reusability and productivity. Web service technology leads to systems interoperability. This work addresses the development of CBS using web services technology. Undeniably, web service may interact with several types of service clients. The central problem is, therefore, how to handle the multidimensional aspect of service clients{\textquoteright} needs and requirements. To tackle this problem, we propose the concept of multiview component as a first class modelling entity that allows the capture of the various needs of service clients by separating their functional concerns. In this paper, we propose a model driven approach for the development of user-aware web services on the basis of the multiview component concept. So, we describe how multiview component based PIM are transformed into two PSMs for the purpose of the automatic generation of both the user-aware web services description and implementation. We specify transformations as a collection of transformation rules implemented using ATL as a model transformation language.}, isbn = {978-3-642-01346-1}, issn = {1865-1348}, author = {El Asri, Bouchra and Kenzi, Adil and Nassar, Mahmoud and Kriouile, Abdelaziz and Barrahmoune, Abdelaziz}, editor = {Filipe, J and Cordeiro, J} } @article {ElAsri2009196, title = {Multiview components for user-aware web services}, journal = {Lecture Notes in Business Information Processing}, volume = {24 LNBIP}, year = {2009}, note = {cited By 3}, pages = {196-207}, doi = {10.1007/978-3-642-01347-8_17}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-65949111093\&doi=10.1007\%2f978-3-642-01347-8_17\&partnerID=40\&md5=fa55771c0bc928fcc7a332af2abc8afc}, author = {El Asri, B. and Kenzi, A. and Nassar, M. and Kriouile, A. and Barrahmoune, A.} } @article {Abik20094, title = {Normalization and personalization of learning situations: NPLS}, journal = {International Journal of Emerging Technologies in Learning}, volume = {4}, number = {2}, year = {2009}, note = {cited By 6}, pages = {4-10}, doi = {10.3991/ijet.v4i2.818}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960336337\&doi=10.3991\%2fijet.v4i2.818\&partnerID=40\&md5=07fc92375360ec9d618ca959c62d07bb}, author = {Abik, M. and Ajhoun, R.} } @conference {ElAsri20091, title = {Vers une architecture MVSOA pour la mise en oeuvre des composants multivue}, booktitle = {CAL 2009 - 3rd Conference Francophone sur les Architectures Logicielles}, year = {2009}, note = {cited By 0}, pages = {1-16}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874262952\&partnerID=40\&md5=259ee2685304602210abdadb34836420}, author = {El Asri, B. and Kenzi, A. and Nassar, M. and Kriouile, A.} } @conference {Asmaa2009171, title = {WiebMat, a new information extraction systemen}, booktitle = {Proceedings of the IADIS European Conference on Data Mining 2009, ECDM{\textquoteright}09 Part of the IADIS Multi Conference on Computer Science and Information Systems, MCCSIS 2009}, year = {2009}, note = {cited By 0}, pages = {171-173}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77955642240\&partnerID=40\&md5=814746990e197f7a2b9c287d94affb40}, author = {Asmaa, E.O. and Aboutajdine, D. and Aziz, D.} } @article {ElAfia2008157, title = {Asymptotic analysis of the trajectories of the logarithmic barrier algorithm without constraint qualifications}, journal = {RAIRO - Operations Research}, volume = {42}, number = {2}, year = {2008}, note = {cited By 0}, pages = {157-198}, doi = {10.1051/ro:2008008}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-44349158950\&doi=10.1051\%2fro\%3a2008008\&partnerID=40\&md5=85dc093209f6926a9de375305dc031bc}, author = {El Afia, A. and Benchakroun, A. and Dussault, J.-P. and El Yassini, K.} } @article { ISI:000255967900006, title = {Asymptotic analysis of the trajectories of the logarithmic barrier algorithm without constraint qualifications}, journal = {RAIRO-OPERATIONS RESEARCH}, volume = {42}, number = {2}, year = {2008}, note = {4th International Conference on Operational Research, Cadi Ayyad Univ, Marrakech, MOROCCO, MAY 22-26, 2005}, month = {APR-JUN}, pages = {157-198}, abstract = {In this paper, we study the differentiability of the trajectories of the logarithmic barrier algorithm for a nonlinear program when the set Lambda{*} of the Karush-Kuhn-Tuckermultiplier vectors is empty owing to the fact that the constraint qualifications are not satisfied.}, issn = {0399-0559}, doi = {10.1051/ro:2008008}, author = {El Afia, A. and Benchakroun, A. and Dussault, J. -P. and El Yassini, K.} } @conference {Berbia2008667, title = {On the decoding of convolutional codes using genetic algorithms}, booktitle = {Proceedings of the International Conference on Computer and Communication Engineering 2008, ICCCE08: Global Links for Human Development}, year = {2008}, note = {cited By 1}, pages = {667-671}, doi = {10.1109/ICCCE.2008.4580688}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-52249083280\&doi=10.1109\%2fICCCE.2008.4580688\&partnerID=40\&md5=8a49644ba289981af8e55b689e45313e}, author = {Berbia, H. and Belkasmi, M. and Elbouanani, F. and Ayoub, F.} } @conference { ISI:000259298000004, title = {Fairness and QoS in ad-hoc networks}, booktitle = {2008 IEEE 67TH VEHICULAR TECHNOLOGY CONFERENCE-SPRING, VOLS 1-7}, series = {IEEE Vehicular Technology Conference Proceedings}, year = {2008}, note = {67th IEEE Vehicular Technology Conference, Marina Bay, SINGAPORE, MAY 11-14, 2008}, pages = {16+}, publisher = {IEEE}, organization = {IEEE}, abstract = {IEEE 802.11 is a standard for Wireless Local Area Networks. Recent works show that this standard has bad performances in ad-hoc mode. In this paper, we analyze the algorithm used by the standard to manage contentions and propose an alternative algorithm to resolve fairness issues observed in the Hidden terminal scenario. Using., NS-2 simulator we show that our algorithm has better throughput management based on fairness between nodes under QoS constraints required by each node.}, isbn = {978-1-4244-1644-8}, issn = {1550-2252}, doi = {10.1109/VETECS.2008.16}, author = {Berqia, Amine and Angoma, Blaise} } @conference {Berqia200816, title = {Fairness and QoS in ad-hoc networks}, booktitle = {IEEE Vehicular Technology Conference}, year = {2008}, note = {cited By 2}, pages = {16-20}, doi = {10.1109/VETECS.2008.16}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-47749084749\&doi=10.1109\%2fVETECS.2008.16\&partnerID=40\&md5=4f700df4d5db788cc77adca79f873bb9}, author = {Berqia, A. and Angoma, B. and Mikou, N. and Dehbi, Y.} } @conference { ISI:000261578000113, title = {Functional Equivalence between Radial Basis Function Neural Networks and Fuzzy Analogy in Software Cost Estimation}, booktitle = {2008 3RD INTERNATIONAL CONFERENCE ON INFORMATION AND COMMUNICATION TECHNOLOGIES: FROM THEORY TO APPLICATIONS, VOLS 1-5}, year = {2008}, note = {3rd International Conference on Information and Communication Technologies, Damascus, SYRIA, APR 07-11, 2008}, pages = {615+}, abstract = {We show in this paper the equivalence between the radial basis function networks and Fuzzy analogy in the field of software cost estimation. We prove that under weak conditions, the three layers of RBFN are functionally equivalent to the three steps of fuzzy analogy. This functional equivalence implies that advances in each literature, such new learning rules or new similarity measures, can be applied to both models directly. Furthermore, this equivalence can help us to provide a natural interpretation of cost estimation models based on RBFN.}, isbn = {978-1-4244-1751-3}, author = {Idri, Ali and Zakrani, Abdelali and Abran, Alain} } @conference {Idri2008, title = {Functional equivalence between radial basis function neural networks and Fuzzy analogy in software cost estimation}, booktitle = {2008 3rd International Conference on Information and Communication Technologies: From Theory to Applications, ICTTA}, year = {2008}, note = {cited By 0}, doi = {10.1109/ICTTA.2008.4530015}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-49149083210\&doi=10.1109\%2fICTTA.2008.4530015\&partnerID=40\&md5=662843c9cdc63dbc94ac6b4d87307bb2}, author = {Idri, A. and Zakrani, A. and Abran, A.} } @conference {Idri2008576, title = {Fuzzy radial basis function neural networks for web applications cost estimation}, booktitle = {Innovations{\textquoteright}07: 4th International Conference on Innovations in Information Technology, IIT}, year = {2008}, note = {cited By 0}, pages = {576-580}, doi = {10.1109/IIT.2007.4430367}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-50249168929\&doi=10.1109\%2fIIT.2007.4430367\&partnerID=40\&md5=ee74c556489187d35257cf7c8b48c2c0}, author = {Idri, A. and Zakrani, A. and Elkoutbi, M. and Abran, A.} } @conference {Kenzi2008, title = {Multi-functional service oriented system development for user-driven adaptability}, booktitle = {2008 3rd International Conference on Information and Communication Technologies: From Theory to Applications, ICTTA}, year = {2008}, note = {cited By 1}, doi = {10.1109/ICTTA.2008.4530324}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-49149094527\&doi=10.1109\%2fICTTA.2008.4530324\&partnerID=40\&md5=b8973d451157200b212f00e1175dfa0a}, author = {Kenzi, A. and El Asri, B. and Nassar, M. and Kriouile, A.} } @conference { ISI:000261578000263, title = {Multi-functional service oriented system development for user-driven adaptability}, booktitle = {2008 3RD INTERNATIONAL CONFERENCE ON INFORMATION AND COMMUNICATION TECHNOLOGIES: FROM THEORY TO APPLICATIONS, VOLS 1-5}, year = {2008}, note = {3rd International Conference on Information and Communication Technologies, Damascus, SYRIA, APR 07-11, 2008}, pages = {1431-1437}, abstract = {Modern enterprises need to respond effectively and pertinently to users need in a competitive markets. In order to cope with the dynamic nature of user profiles, we propose, in this paper, a Multi-Functional Service oriented system development. While the concept of a service is not new on itself, the multifunctional one provides a convenient new concept which addresses separate properties that cope to different user business domain respecting information pertinence and access right definition. So we focus on a user-driven adaptability of services components. Thus, we present multi-functional service concept; we give a process to identify services common and specific interest according to different user through use cases. We continue our process so to design and deploy such services in the context of the model driven architecture.}, isbn = {978-1-4244-1751-3}, author = {Kenzi, Adil and El Asri, Bouchra and Nassar, Mahmoud and Kriouile, Abdelaziz} } @conference {Kenzi200838, title = {The multiview service : A new concept for the development of adaptable Service Oriented systems}, booktitle = {Proceedings of the 4th IEEE International Symposium on Service-Oriented System Engineering, SOSE 2008}, year = {2008}, note = {cited By 2}, pages = {38-43}, doi = {10.1109/SOSE.2008.31}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-62249157099\&doi=10.1109\%2fSOSE.2008.31\&partnerID=40\&md5=1e64b5ddf0ad5ddb3c8491f8544409a9}, author = {Kenzi, A. and Asri, B.E. and Nassar, M. and Kriouile, A.} } @conference {Anwar2008360, title = {A QVT-based approach for model composition application to the VUML profile}, booktitle = {ICEIS 2008 - Proceedings of the 10th International Conference on Enterprise Information Systems}, volume = {2 ISAS}, year = {2008}, note = {cited By 3}, pages = {360-367}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-55849113397\&partnerID=40\&md5=5f1c1fa60105fdd355ccff73f9eb30e4}, author = {Anwar, A. and Ebersold, S. and Nassar, M. and Coulette, B. and Kriouile, A.} } @conference { ISI:000261578001077, title = {A Recent Survey on Key Management Schemes in MANET}, booktitle = {2008 3RD INTERNATIONAL CONFERENCE ON INFORMATION AND COMMUNICATION TECHNOLOGIES: FROM THEORY TO APPLICATIONS, VOLS 1-5}, year = {2008}, note = {3rd International Conference on Information and Communication Technologies, Damascus, SYRIA, APR 07-11, 2008}, pages = {2031-2036}, abstract = {An ad hoe mobile network (MANET), is a collection of wireless mobile hosts that form a temporary network without the aid of any centralized administration or support. In such a network, each mobile node operates not only as a host but also as a router, forwarding packets for other mobile nodes in the network that may be multiple hops away from each other. With those network Characteristics, security has become a primary concern for researchers to meet scientific challenges to market opportunities in term or confidentiality, authentication, integrity, availability, access control, and non-repudiation. In the same way and as a powerful tool in achieving security, the Key Management becomes a corner stone in MANET security by proposing an appropriate secure schema for handling cryptographic keying materials. The Key Management scope includes key generation, key distribution, and key maintenance. In this paper, we aim to evaluate and to present a recent overview on different research works on Key Management in MANETs.}, isbn = {978-1-4244-1751-3}, author = {Aziz, Baayer and Nourdine, Enneya and Mohamed, El Koutbi} } @conference { ISI:000259298600002, title = {Software cost estimation models using Radial Basis Function Neural Networks}, booktitle = {SOFTWARE PROCESS AND PRODUCT MEASUREMENT}, series = {Lecture Notes in Computer Science}, volume = {4895}, year = {2008}, note = {Joint Meeting of the International Workshop on Software Measurement (IWSM)/International Conference on Software Process and Product Measurement (MENSURA), Palma de Mallorca, SPAIN, NOV 05-07, 2007}, pages = {21+}, abstract = {Radial Basis Function Neural Networks (RBFN) have been recently studied due to their qualification as an universal function approximation. This paper investigates the use of RBF neural networks for software cost estimation, The focus of this study is on the design of these networks, especially their middle layer composed of receptive fields, using two clustering techniques: the C-means and the APC-III algorithms. A comparison between a RBFN using C-means and a RBFN using APC-III, in terms of estimates accuracy, is hence presented. This study uses the COCOMO{\textquoteright}81 dataset and data on Web applications from the Tukutuku database.}, isbn = {978-3-540-85552-1}, issn = {0302-9743}, author = {Idri, Ali and Zahi, Azeddine and Mendes, Emilia and Zakrani, Abdelali}, editor = {CuadradoGallego, JJ and Braungarten, R and Dumke, RR and Abran, A} } @conference {Ba{\"\i}na200831, title = {Strategic business/IT alignment using goal models}, booktitle = {CEUR Workshop Proceedings}, volume = {336}, year = {2008}, note = {cited By 2}, pages = {31-43}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84875194981\&partnerID=40\&md5=9da7e48abcceaa5f0d8cee4b42cce2e5}, author = {Baina, S. and Ansias, P.-Y. and Petit, M. and Castiaux, A.} } @conference {Anwar200883, title = {Towards a generic approach for model composition}, booktitle = {Proceedings - The 3rd International Conference on Software Engineering Advances, ICSEA 2008, Includes ENTISY 2008: International Workshop on Enterprise Information Systems}, year = {2008}, note = {cited By 2}, pages = {83-90}, doi = {10.1109/ICSEA.2008.38}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-57649211986\&doi=10.1109\%2fICSEA.2008.38\&partnerID=40\&md5=2fddbb103e8241a60cdfcc637896b79e}, author = {Anwar, A. and Ebersold, S. and Nassar, M. and Coulette, B. and Kriouile, A.} } @article { ISI:000208118700002, title = {Adjustable SAD matching algorithm using frequency domain}, journal = {JOURNAL OF REAL-TIME IMAGE PROCESSING}, volume = {1}, number = {4}, year = {2007}, month = {JUL}, pages = {257-265}, abstract = {Fast Fourier transforms (FFTs) which are O(N logN) algorithms to compute a discrete Fourier transform (DFT) of size N have been called one of the ten most important algorithms of the twentieth century. However, even though many algorithms have been developed to speed up the computation the sum of absolute difference (SAD) matching, they are exclusively designed in the spatial domain. In this paper, we propose a fast frequency algorithm to speed up the process of (SAD) matching. We use a new approach to approximate the SAD metric by cosine series which can be expressed in correlation terms. These latter can be computed using FFT algorithms. Experimental results demonstrate the effectiveness of our method when using only the first correlation terms for block and template matching in terms of accuracy and speed. The proposed algorithm is suitable for software implementations and has a deterministic execution time unlike the existing fast algorithms for SAD matching.}, issn = {1861-8200}, doi = {10.1007/s11554-007-0026-0}, author = {Essannouni, F. and Haj Thami, R. Oulad and Aboutajdine, D. and Salam, A.} } @article { ISI:000256074600006, title = {Fast exhaustive block-based motion vector estimation algorithm using FFT}, journal = {ARABIAN JOURNAL FOR SCIENCE AND ENGINEERING}, volume = {32}, number = {2C}, year = {2007}, note = {18th International Conference on Microelectronics (ICM 2006), King Fahd Univ Petr \& Minerals, Dhahran, SAUDI ARABIA, DEC 16-18, 2006}, month = {DEC}, pages = {61-74}, abstract = {In video processing, block motion estimation represents a CPU-intensive task. For this reason, many fast algorithms have been developed to improve searching and matching phases. These methods however work generally in the spatial domain. In this paper we propose to benefit from speed of the available FFT algorithms. The proposed algorithm computes the motion vector for two blocks using simultaneous two-dimensional cross correlations and use again the FFT to compute the sum square blocks in the frequency domain. Simulation results show that the proposed algorithm gives an optimal SSD (sum square differences) full search results while having a computational cost inferior to the classical fast block matching algorithms.}, issn = {1319-8025}, author = {Essannouni, F. and Thami, R. Oulad Haj and Aboutajdine, D. and Salam, A.} } @article {2489083820070426, title = {Fast L4 template matching using frequency domain.}, journal = {Electronics Letters}, volume = {43}, number = {9}, year = {2007}, pages = {507 - 508}, abstract = {Fast and simple L4 norm computation for translational template matching is presented. The proposed method is based on expressing the L4 norm in terms of two cross-correlations. Speed is obtained from computing the correlations in the frequency domain using FFT algorithms. [ABSTRACT FROM AUTHOR]}, keywords = {Algebra, Algorithms, Correlation (Statistics), Mathematical statistics, Pattern recognition systems}, issn = {00135194}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=24890838\&site=ehost-live}, author = {Essannouni, F. and Oulad Haj Thami, R. and Aboutajdine, D. and Salam, A.} } @article { ISI:000246875600015, title = {Fast L4 template matching using frequency domain}, journal = {ELECTRONICS LETTERS}, volume = {43}, number = {9}, year = {2007}, month = {APR 26}, pages = {507-508}, abstract = {Fast and simple L4 norm computation for translational template matching is presented. The proposed method is based on expressing the L4 norm in terms of two cross-correlations. Speed is obtained from computing the correlations in the frequency domain using FFT algorithms.}, issn = {0013-5194}, doi = {10.1049/el:20070793}, author = {Essannouni, F. and Thami, R. Oulad Haj and Aboutajdine, D. and Salam, A.} } @conference { ISI:000255983000005, title = {Fuzzy radial basis function neural networks for web applications cost estimation}, booktitle = {2007 INNOVATIONS IN INFORMATION TECHNOLOGIES, VOLS 1 AND 2}, year = {2007}, note = {4th International Conference on Innovations in Information Technology, Dubai, U ARAB EMIRATES, NOV 18-20, 2007}, pages = {21+}, publisher = {IEEE}, organization = {IEEE}, abstract = {The Fuzzy Radial basis function Neural Networks (FRBFN) for software cost estimation is designed by integrating the principles of RBFN and the fuzzy C-means clustering algorithm. The architecture of the network is suitably modified at the hidden layer to realise a novel neural implementation of the fuzzy clustering algorithm. Fuzzy set-theoretic concepts are incorporated at the hidden layer, enabling the model to handle uncertain and imprecise data, which can improve greatly the accuracy of obtained estimates. MMRE and Pred are used as measures of prediction accuracy for this comparative study. The results show that an RBFN using fuzzy C-means performs better than an RBFN using hard C-means. This study uses data on web applications from the Tukutuku database.}, isbn = {978-1-4244-1840-4}, author = {Idri, Ali and Zakrani, Abdelali and Elkoutbi, Mohamed and Abran, Alain} } @conference { ISI:000256344200195, title = {Iterative threshold decoding of one step majority logic decodable block codes}, booktitle = {2007 IEEE INTERNATIONAL SYMPOSIUM ON SIGNAL PROCESSING AND INFORMATION TECHNOLOGY, VOLS 1-3}, year = {2007}, note = {7th IEEE International Symposium on Signal Processing and Information Technology, Cairo, EGYPT, DEC 15-18, 2007}, pages = {1059+}, publisher = {IEEE; IEEE Signal Proc Soc; IEEE Comp Soc}, organization = {IEEE; IEEE Signal Proc Soc; IEEE Comp Soc}, abstract = {In this paper an iterative decoding of one step majority logic decodable (OSMLD) block codes is studied. We use a soft-in soft-out of APP threshold algorithm which is able to decode theses codes nearly as well as belief propagation (BP) algorithm. The Computation time of the proposed algorithm is very low and good performance results are obtained on both AWGN and Rayleigh fading channels. A comparison of the proposed algorithm and BP algorithm is also presented. We succeed to apply EXIT chart technique to our iterative process.}, isbn = {978-1-4244-1834-3}, author = {Lahmer, M. and Belkasmi, M. and Ayoub, F.} } @article { ISI:000251549200037, title = {Simple noncircular correlation method for exhaustive sum square difference matching}, journal = {OPTICAL ENGINEERING}, volume = {46}, number = {10}, year = {2007}, month = {OCT}, abstract = {We propose a new algorithm to calculate the sum square difference (SSD) metric for estimating translational motion. Using some substitutions and complex arithmetics, computation of the SSD is derived to be a correlation function of two substituting functions. The former can be computed using the fast Fourier transform (FFT) approach, which is less computationally expensive than the direct computation of the SSD. Zero padding of the block is useful both to avoid the circular character of the FFT and to computes the SSD function at once. We prove the existence of a simple and optimal correlation method for noncircular SSD matching. (c) 2007 Society of Photo-Optical Instrumentation Engineers.}, issn = {0091-3286}, doi = {10.1117/1.2786469}, author = {Essannouni, F. and Thami, R. Oulad Haj and Aboutajdine, D. and Salam, A.} } @conference { ISI:000245384800059, title = {An optimal and statistically robust correlation technique for block based motion estimation}, booktitle = {2006 IEEE INTERNATIONAL CONFERENCE ON MULTIMEDIA AND EXPO - ICME 2006, VOLS 1-5, PROCEEDINGS}, year = {2006}, note = {IEEE International Conference on Multimedia and Expo (ICME 2006), Toronto, CANADA, JUL 09-12, 2006}, pages = {233+}, publisher = {IEEE; IEEE Circuits \& Syst Soc; IEEE Commun Soc; IEEE Comp Soc; IEEE Signal Proc Soc}, organization = {IEEE; IEEE Circuits \& Syst Soc; IEEE Commun Soc; IEEE Comp Soc; IEEE Signal Proc Soc}, abstract = {In this paper we propose an optimal and robust correlation technique for the local motion estimation purposes. It is based on the maximization of a statistical robust matching function, which is computed in the frequency domain and therefore can be implemented by fast transformation algorithms. We show that our method achieves a significant speed up and robustness over the full search block-matching algorithm. We also present a comparative performance analysis, which shows that the proposed method greatly outperforms the state-of-the-art in correlation motion estimation.}, isbn = {978-1-4244-0366-0}, doi = {10.1109/ICME.2006.262425}, author = {Essannouni, F. and Thami, R. Oulad Haj and Salam, A. and Aboutajdine, D.} } @conference { ISI:000245125200117, title = {The use of the annotations in a collaborative activity of reading an online hypermedia course document}, booktitle = {2006 7TH INTERNATIONAL CONFERENCE ON INFORMATION TECHNOLOGY BASED HIGHER EDUCATION AND TRAINING, VOLS 1 AND 2}, year = {2006}, note = {7th International Conference on Information Technology Based Higher Education and Training, Sydney, AUSTRALIA, JUL 10-13, 2006}, pages = {797-802}, publisher = {hp; UNESCO; UTS, IICTG; UTS, IML; CeNTIE; IEEE}, organization = {hp; UNESCO; UTS, IICTG; UTS, IML; CeNTIE; IEEE}, abstract = {In this paper, we deal with collective distance learning and in particular for the use of communication tools in the context of a collaborative activity of {\textquoteleft}{\textquoteleft}reading and comprehension of an online hypermedia course document{{\textquoteright}{\textquoteright}}. general, to create a collaborative situation between learners and/or with the tutor, the majority of the existincy systems for e-learning use primarily the traditional communication tools (email, chat and/or discussion forums). These tools constitute an important means to exchange ideas between learners, to validate and enrich their training {[}1]. However, when the exchanges between learners are numerous, learners find difficulties to read all that was exchanged, to distinguish between the most important messages from those which are less, and to associate a message to the corresponding part of the course. To solve these problems and in order to encourage and stimulate the interactions, we think that the association of messages, transcribed by learners or tutor (comment, question, answer, etc), with the corresponding parts of the course, will facilitate their reading and will motivate the learners to use them as a mean of validation of the course concepts and of collaboration. We propose in this paper, a solution based on a collaborative Web annotation tool called SMARTNotes, allowing all learners as well as the tutor to be able to collaborate together for either a part or the globality of the course.}, isbn = {978-1-4244-0405-6}, author = {Bouzidi, Driss and Ajhoun, Rachida and Benkiran, Amine} } @conference { ISI:000231849800003, title = {Towards mining structural workflow patterns}, booktitle = {DATABASE AND EXPERT SYSTEMS APPLICATIONS, PROCEEDINGS}, series = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {3588}, year = {2005}, note = {16th International Workshop on Database and Expert Systems Applications, Copenhagen Business Sch, Copenhagen, DENMARK, AUG 22-26, 2005}, pages = {24-33}, publisher = {Danish Minist Sci, Technol \& Innov; DEXA Assoc; Austrian Comp Soc; Res Inst Appl Knowledge Proc; FAW Software Engn GmbH}, organization = {Danish Minist Sci, Technol \& Innov; DEXA Assoc; Austrian Comp Soc; Res Inst Appl Knowledge Proc; FAW Software Engn GmbH}, abstract = {Collaborative information systems are becoming more and more complex, involving numerous interacting business objects within considerable processes. Analysing the interaction structure of those complex systems will enable them to be well understood and controlled. The work described in this paper is a contribution to these problems for workflow based process applications. In fact, we discover workflow patterns from traces of workflow events based on a workflow mining technique. Workflow mining proposes techniques to acquire a workflow model from a workflow log. Mining of workflow patterns is done by a statistical analysis of log-based event. Our approach is characterised by a {\textquoteleft}{\textquoteleft}local{{\textquoteright}{\textquoteright}} workflow patterns discovery that allows to cover partial results and a dynamic technique dealing with concurrency.}, isbn = {3-540-28566-0}, issn = {0302-9743}, author = {Gaaloul, W and Baina, K and Godart, C}, editor = {Andersen, KV and Debenham, J and Wagner, R} } @conference { ISI:000187617100054, title = {Toward a generation of code multi-target for the VBOOM method}, booktitle = {SERP{\textquoteright}03: PROCEEDINGS OF THE INTERNATIONAL CONFERENCE ON SOFTWARE ENGINEERING RESEARCH AND PRACTICE, VOLS 1 AND 2}, year = {2003}, note = {International Conference on Software Engineering Research and Practice, LAS VEGAS, NV, JUN 23-26, 2003}, pages = {345-350}, publisher = {Comp Sci Res, Educ \& Applicat Press; Int Technol Inst; Korean Soc Internet Informat; World Acad Sci Informat Technol}, organization = {Comp Sci Res, Educ \& Applicat Press; Int Technol Inst; Korean Soc Internet Informat; World Acad Sci Informat Technol}, abstract = {VBOOM method (View Based Object Oriented Method) developed by our team, is a method of analysis and conception by object points of views, specific to the complex system modeling with rights of access. However, it only generates the code VBOOL source (View Based Object Oriented Langage) language targets this last and extension of the Eiffel language. To this subject, one will expose in the present article a new approach to the UML norm (Unified Modeling Language) to generate code multi-target through this method. The approach thus the adopted consist of translation by a mechanism of conversion based on concepts of the UML notation: the flexible class and the visibility{\textquoteright}s relation. These two essential notions, non-existent in the UML and based of the final model of VBOOM, will be able produced by an implementation{\textquoteright}s solution targeting several languages oriented objects as JAVA, C++, etc.}, isbn = {1-932415-21-1}, author = {Sbihi, B and Abdelaziz, K and Ahmed, E and Coulette, B}, editor = {AlAni, B and Arabnia, HR and Mun, Y} } @conference { ISI:000176683300003, title = {Estimating software project effort by analogy based on linguistic values}, booktitle = {EIGHTH IEEE SYMPOSIUM ON SOFTWARE METRICS, PROCEEDINGS}, year = {2002}, note = {8th IEEE Symposium on Software Metrics, OTTAWA, CANADA, JUN 04-07, 2002}, pages = {21-30}, publisher = {IEEE Comp Soc; ALC TEL; CISTEL; Metric Ctr; Carleton Univ; Natl Res Council Canada}, organization = {IEEE Comp Soc; ALC TEL; CISTEL; Metric Ctr; Carleton Univ; Natl Res Council Canada}, abstract = {Estimation models in software engineering are used to predict some important attributes of future entities such as software development effort, software reliability and programmers productivity. Among these models, those estimating software effort have motivated considerable research in recent years. The prediction procedure used by these software-effort models can be based on a mathematical function or other techniques such as analogy based reasoning, neural networks, regression trees, and rule induction models. Estimation by analogy is one of the most attractive techniques in the software effort estimation field. However, the procedure used in estimation by analogy is not yet able to handle correctly linguistic values (categorical data) such as {\textquoteleft}very low{\textquoteright}, {\textquoteleft}low{\textquoteright} and {\textquoteleft}high{\textquoteright}. In this paper, we propose a new approach based on reasoning by analogy, fuzzy logic and linguistic quantifiers to estimate software project effort when it is described either by numerical or linguistic values; this approach is referred to as Fuzzy Analogy. This paper also presents an empirical validation of our approach based on the COCOMO{\textquoteright}81 dataset.}, isbn = {0-7695-1339-5}, doi = {10.1109/METRIC.2002.1011322}, author = {Idri, A and Abran, A and Khoshgoftaar, TM} } @article { ISI:000180261100004, title = {Investigating soft computing in case-based reasoning for software cost estimation}, journal = {ENGINEERING INTELLIGENT SYSTEMS FOR ELECTRICAL ENGINEERING AND COMMUNICATIONS}, volume = {10}, number = {3}, year = {2002}, month = {SEP}, pages = {147-157}, abstract = {Software cost estimation has been the subject of intensive investigations in the field of software engineering. As a result, numerous software cost estimation techniques have been proposed and investigated. To our knowledge, currently there are no cost estimation techniques that can incorporate and/or tolerate the aspects of imprecision, vagueness, and uncertainty into their predictions. However, software projects are often described by vague information. Furthermore, an estimate is only a probabilistic assessment of a future condition. Consequently, cost estimation models must be able to deal with imprecision and uncertainty, the two principal components of soft computing. To estimate the cost of software projects when they are described by vague and imprecise attributes, in an earlier study we have proposed an innovative approach referred to as Fuzzy Analogy. In this paper, we investigate the uncertainty of cost estimates generated by the Fuzzy Analogy approach. The primary aim is to generate a set of possible values for the actual software development cost. This set can then be used to deduce, for practical purposes, a point estimate for the cost, and for analyzing the risks associated with all possible estimates.}, issn = {0969-1170}, author = {Idri, A and Khoshgoftaar, TM and Abran, A} }