@article {Majdoubi2022, title = {A Decentralized Trust Establishment Protocol for Smart IoT Systems}, journal = {Internet of Things (Netherlands)}, volume = {20}, year = {2022}, note = {cited By 0}, abstract = {The Internet of Things (IoT) is a widely deployed technology to provide innovative services in many several fields such as industry, logistics, healthcare, and energy management. The integration of artificial intelligence, machine learning, edge/cloud computing, 5G networks, big data analytics, and other emerging technologies in IoT systems is leading to the emergence of Smart Systems that support devices to make autonomous decisions in order to improve and facilitate people{\textquoteright}s lives. These smart IoT devices produce a huge amount of sensitive data. Thus, establishing Trust between these devices is vital to ensure the security of the sensed data and for the normal functionality of smart systems. To address these challenges, several Trust establishment protocols have been proposed, but they have some limitations such as centralization, the need for human intervention, and the lack of adaptability to smart devices with computation and storage constraints. In this paper, a decentralized trust establishment protocol is proposed for smart IoT devices and is applicable for different scenarios. The protocol provides decentralized identity management, mutual authentication, access control, and secure communication by combining Blockchain technology with PV-SAS-MCA message cross-authentication protocol based on Short Authenticated Strings (SAS). To the best of our knowledge, this research is the first to propose a new approach that uses Blockchain as an extra distributed and authenticated channel to exchange Short Authenticated Strings without physical interaction. This helps to allow entities in the Smart IoT environment not only to identify each other but also to exchange their public keys in a secure manner. Moreover, the proposed protocol uses Elliptic Curve Cryptography (ECC) to secure communications with significantly smaller key sizes and lower computation overhead. Experimental results reveal that the time required to complete the authentication process is less than 90 ms which is a superior performance when compared to state-of-the-art decentralized authentication mechanisms. {\textcopyright} 2022 Elsevier B.V.}, doi = {10.1016/j.iot.2022.100634}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85141956819\&doi=10.1016\%2fj.iot.2022.100634\&partnerID=40\&md5=09f4ee511269a6c179ffb6176350529b}, author = {Majdoubi, D.E. and Bakkali, H.E. and Bensaih, M. and Sadki, S.} } @article {Nakach2022446, title = {Deep Hybrid AdaBoost Ensembles for Histopathological Breast Cancer Classification}, journal = {Lecture Notes in Networks and Systems}, volume = {468 LNNS}, year = {2022}, note = {cited By 0}, pages = {446-455}, abstract = {Breast cancer (BC) is the most common diagnosed cancer type and one of the top leading causes of death in women worldwide. The early diagnosis of this type of cancer is the main driver of high survival rate. This paper aims to use homogenous ensemble learning and transfer learning for binary classification of BC histological images over the four-magnification factor (MF) values of the BreakHis dataset: 40X, 100X, 200X, and 400X. The proposed ensembles are implemented using a hybrid architecture (HA) that combines: (1) three of the most recent deep learning (DL) techniques as feature extractors (FE): DenseNet_201, Inception_V3, and MobileNet_V2, and (2) the boosting method AdaBoost with Decision Tree (DT) as a base learner. The study evaluated and compared: the ensembles designed with the same HA but with different number of trees (50, 100, 150 and 200), the single DT classifiers with the best AdaBoost ensembles and the best AdaBoost ensembles of each FE over each MF. The empirical evaluations used: four classification performance criteria (accuracy, recall, precision and F1-score), 5-fold cross-validation, Scott Knott (SK) statistical test to select the best cluster of the outperforming models, and Borda Count voting system to rank the best performing ones. Results showed the potential of combining DL techniques for FE and AdaBoost boosting method to classify BC in malignant and benign tumors, furthermore the AdaBoost ensemble constructed using 200 trees, DenseNet_201 as FE and MF 200X achieved the best mean accuracy value with 90.36\%. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04826-5_45}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130279063\&doi=10.1007\%2f978-3-031-04826-5_45\&partnerID=40\&md5=59d8d87203f77eed9f4da56cae519226}, author = {Nakach, F.-Z. and Zerouaoui, H. and Idri, A.} } @article {Zerouaoui2022, title = {Deep hybrid architectures for binary classification of medical breast cancer images}, journal = {Biomedical Signal Processing and Control}, volume = {71}, year = {2022}, note = {cited By 16}, abstract = {The diagnosis of breast cancer in the early stages significantly decreases the mortality rate by allowing the choice of adequate treatment. This study developed and evaluated twenty-eight hybrid architectures combining seven recent deep learning techniques for feature extraction (DenseNet 201, Inception V3, Inception ReseNet V2, MobileNet V2, ResNet 50, VGG16, and VGG19), and four classifiers (MLP, SVM, DT, and KNN) for a binary classification of breast pathological images over the BreakHis and FNAC datasets. The designed architectures were evaluated using: (1) four classification performance criteria (accuracy, precision, recall, and F1-score), (2) Scott Knott (SK) statistical test to cluster the proposed architectures and identify the best cluster of the outperforming architectures, and (3) the Borda Count voting method to rank the best performing architectures. The results showed the potential of combining deep learning techniques for feature extraction and classical classifiers to classify breast cancer in malignant and benign tumors. The hybrid architecture using the MLP classifier and DenseNet 201 for feature extraction (MDEN) was the top performing architecture with higher accuracy values reaching 99\% over the FNAC dataset, 92.61\%, 92\%, 93.93\%, and 91.73\% over the four magnification factor values of the BreakHis dataset: 40X, 100X, 200X, and 400X, respectively. The results of this study recommend the use of hybrid architectures using DenseNet 201 for the feature extraction of the breast cancer histological images because it gave the best results for both datasets BreakHis and FNAC, especially when combined with the MLP classifier. {\textcopyright} 2021 Elsevier Ltd}, keywords = {accuracy, algorithm, Article, augmentation index, Binary classification, biopsy technique, Breast Cancer, Breast Cancer Histopathological Image Classification, Classification (of information), Classification algorithm, classifier, Cluster computing, clustering algorithm, Clustering algorithms, colloid carcinoma, Computer aided diagnosis, Computer architecture, construct validity, contrast limited adaptive histogram equalization, Convolutional neural network, Convolutional neural networks, deep hybrid architecture, Deep learning, Deep neural networks, Diseases, ductal carcinoma, external validity, Extraction, F1 score, Feature extraction, Features extraction, feed forward neural network, fibroadenoma, fine needle aspiration biopsy, histogram, Histological images, histology, Hybrid architectures, Image classification, image processing, Images processing, internal validity, learning algorithm, Learning algorithms, Learning techniques, lobular carcinoma, Machine learning, measurement precision, Medical imaging, MLP classifiers, Mortality rate, Network architecture, papillary carcinoma, Pathological images, phyllodes tumor, recall, residual neural network, scoring system, Scott Knott, Support vector machines}, doi = {10.1016/j.bspc.2021.103226}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85125746862\&doi=10.1016\%2fj.bspc.2021.103226\&partnerID=40\&md5=b9b74d0dcb135861bc2e3d820f836efa}, author = {Zerouaoui, H. and Idri, A.} } @article {Lahmar2022, title = {Deep hybrid architectures for diabetic retinopathy classification}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, year = {2022}, note = {cited By 0}, abstract = {Diabetic retinopathy (DR) is the most severe ocular complication of diabetes. It leads to serious eye complications such as vision impairment and blindness. A computer-aided diagnosis may help in the early detection of this disease, which increases the chances of treating it efficiently. This paper carried out an empirical evaluation of the performances of 28 deep hybrid architectures for an automatic binary classification of the referable diabetic retinopathy, and compared them to seven end-to-end deep learning (DL) architectures. For the hybrid architectures, we combined seven DL techniques for feature extraction (DenseNet201, VGG16, VGG19, MobileNet_V2, Inception_V3, Inception_ResNet_V2 and ResNet50) and four classifiers (SVM, MLP, DT and KNN). For the end-to-end DL architectures, we used the same techniques used for the feature extraction in the hybrid architectures. The architectures were compared in terms of accuracy, sensitivity, precision and F1-score using the Scott Knott test and the Borda count voting method. All the empirical evaluations were over three datasets: APTOS, Kaggle DR and Messidor-2, using a k-fold cross validation method. The results showed the potential of combining deep learning techniques for feature extraction and classical machine learning techniques to classify referable diabetic retinopathy. The hybrid architecture using the SVM classifier and MobileNet_V2 for feature extraction was the top performing architecture and it was classified with the best performing end-to-end architectures in the best clusters of APTOS, Kaggle DR and Messidor-2 datasets with an accuracy equal to 88.80\%, 84.01\% and 84.05\% respectively. Note that the two end-to-end architectures DenseNet201 and MobileNet_V2 outperformed all the hybrid architectures over the three datasets. However, we recommend the use of the hybrid architecture designed with SVM and MobileNet_V2 since it is promising and less time consuming, and requires less parameter tuning compared to the end-to-end techniques. {\textcopyright} 2022 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {Classification (of information), Computer aided diagnosis, Deep learning, Diabetic retinopathy, Empirical evaluations, End to end, Extraction, Eye protection, Feature extraction, Features extraction, Hybrid architectures, Image classification, Learning algorithms, Learning architectures, Learning techniques, Medical image, Medical imaging, Performance, Support vector machines, Vision impairments}, doi = {10.1080/21681163.2022.2060864}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85129262042\&doi=10.1080\%2f21681163.2022.2060864\&partnerID=40\&md5=35c1d7e5976fbdb00e23b4aa456eadc9}, author = {Lahmar, C. and Idri, A.} } @book {AlAfandy2022127, title = {Deep learning}, series = {Approaches and Applications of Deep Learning in Virtual Medical Care}, year = {2022}, note = {cited By 0}, pages = {127-166}, abstract = {This chapter provides a comprehensive explanation of deep learning including an introduction to ANNs, improving the deep NNs, CNNs, classic networks, and some technical tricks for image classification using deep learning. ANNs, mathematical models for one node ANN, and multi-layers/multi-nodes ANNs are explained followed by the ANNs training algorithm followed by the loss function, the cost function, the activation function with its derivatives, and the back-propagation algorithm. This chapter also outlines the most common training problems with the most common solutions and ANNs improvements. CNNs are explained in this chapter with the convolution filters, pooling filters, stride, padding, and the CNNs mathematical models. This chapter explains the four most commonly used classic networks and ends with some technical tricks that can be used in CNNs model training. {\textcopyright} 2022 by IGI Global.}, doi = {10.4018/978-1-7998-8929-8.ch006}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85139678781\&doi=10.4018\%2f978-1-7998-8929-8.ch006\&partnerID=40\&md5=7ccf9fb39c61262036843028b0c808a6}, author = {Al Afandy, K.A. and Omara, H. and Lazaar, M. and Al Achhab, M.} } @article {ElAlaoui2022435, title = {Deep Stacked Ensemble for Breast Cancer Diagnosis}, journal = {Lecture Notes in Networks and Systems}, volume = {468 LNNS}, year = {2022}, note = {cited By 1}, pages = {435-445}, abstract = {Breast cancer is considered one of the major public health issues and a leading cause of death among women in the world. Its early diagnosis can significantly help to increase the chances of survival rate. Therefore, this study proposes a deep stacking ensemble technique for binary classification of breast histopathological images over the BreakHis dataset. Initially, to form the base learners of the deep stacking ensemble, we trained seven deep learning (DL) techniques based on pre-trained VGG16, VGG19, ResNet50, Inception_V3, Inception_ResNet_V2, Xception, and MobileNet with a 5-fold cross-validation method. Then, a meta-model was built, a logistic regression algorithm that learns how to best combine the predictions of the base learners. Furthermore, to evaluate and compare the performance of the proposed technique, we used: (1) four classification performance criteria (accuracy, precision, recall, and F1-score), and (2) Scott Knott (SK) statistical test to cluster and identify the outperforming models. Results showed the potential of the stacked deep learning techniques to classify breast cancer images into malignant or benign tumor. The proposed deep stacking ensemble reports an overall accuracy of 93.8\%, 93.0\%, 93.3\%, and 91.8\% over the four magnification factors (MF) values of the BreakHis dataset: 40X, 100X, 200X and 400X, respectively. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-04826-5_44}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130273664\&doi=10.1007\%2f978-3-031-04826-5_44\&partnerID=40\&md5=4246b5750ce0b8a03e4a80001cdfc5e5}, author = {El Alaoui, O. and Zerouaoui, H. and Idri, A.} } @article {Didi2022427, title = {Design of a Security System Based on Raspberry Pi with Motion Detection}, journal = {Lecture Notes in Networks and Systems}, volume = {455 LNNS}, year = {2022}, note = {cited By 2}, pages = {427-434}, abstract = {Currently, the integration of IoT technology in various fields is very widely used, however, data security remains the essential point to be monitored especially in companies, and also in homes. To control and overcome security-related problems, we adopted Internet of Things technology based on a Raspberry pi4 as the main data processing element in this study. In this paper, we present a simple, efficient, and very reliable study for the monitoring of a video stream coming from a camera installed on a Raspberry pi4 which constitutes the essential element in our project. To reproduce this realization, we did not use a motion sensor, but we took advantage of the algorithm advantages of the Motion software integrated into the free operating system MotionEyeOs on a Raspberry pi4 to trigger motion detection by causing a beep to draw attention. On the other hand, our study was implemented without noticed difficulty, and with a great level of performance and stability which shows that our realization of the Video Stream Surveillance System is successful. {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.}, doi = {10.1007/978-3-031-02447-4_44}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85130364762\&doi=10.1007\%2f978-3-031-02447-4_44\&partnerID=40\&md5=b04857d3a313f1841a7b503bccd21ff8}, author = {Didi, Z. and El Azami, I. and Boumait, E.M.} } @article {Elghomary202236, title = {Design of a Smart MOOC Trust Model: Towards a Dynamic Peer Recommendation to Foster Collaboration and Learner{\textquoteright}s Engagement}, journal = {International Journal of Emerging Technologies in Learning}, volume = {17}, number = {5}, year = {2022}, note = {cited By 2}, pages = {36-56}, abstract = {Recent evolutions in the Internet of Things (IoT) and Social IoT (SIoT) are facilitating collaboration as well as social interactions between entities in various environments, especially Smart Learning Ecosystems (SLEs). However, in these contexts, trust issues become more intense, learners feel suspicious and avoid collaborating with their peers, leading to their demotivation and disengagement. Hence, a Trust Management System (TMS) has become a crucial challenge to promote qualified collaboration and stimulate learners{\textquoteright} engagement. In the literature, several trust models were proposed in various domains, but rarely those that address trust issues in SLEs, especially in MOOCs. While these models exclusively rank the best nodes and fail to detect the untrustworthy ones. Therefore, in this paper, we propose Machine Learning-based trust evaluation model that considers social and dynamic trust parameters to quantify entities{\textquoteright} behaviors. It can distinguish trustworthy and untrustworthy behaviors in MOOCs to recommend benign peers while blocking malicious ones to build a dynamic trust-based peer recommendation in the future phase. Our model prevents learners from wasting their time in unprofitable interactions, protects them from malicious actions, and boosts their engagement. A simulation experiment using real-world SIoT datasets and encouraging results show the performance of our trust model {\textcopyright} 2022, International Journal of Emerging Technologies in Learning. All Rights Reserved.}, keywords = {Curricula, e-learning, Internet of things, Learning ecosystems, Machine learning, Massive open online course, Peer recommendation, Security of data, Smart education, Social internet of thing, Trust management system, Trust management systems, Trust models}, doi = {10.3991/ijet.v17i05.27705}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85127011761\&doi=10.3991\%2fijet.v17i05.27705\&partnerID=40\&md5=5d062bc0485ddb7b7d27830779b55330}, author = {Elghomary, K. and Bouzidi, D. and Daoudi, N.} } @article {Ettazi2022117, title = {Design patterns for building context-aware transactional services in PaaS-enabled systems}, journal = {International Journal of Web Engineering and Technology}, volume = {17}, number = {2}, year = {2022}, note = {cited By 0}, pages = {117-143}, abstract = {Pervasive computing is characterised by a key feature that affects the operating environment of services and users. It places more emphasis on dynamic environments where available resources continuously vary without prior knowledge of their availability, while in static environments the services provided to users are determined in advance. At the same time, cloud computing paradigm introduced flexibility of use according to the user{\textquoteright}s profile and needs. In this paper, we aimed to provide context-aware transactional service (CATS) applications with solutions so that it can be integrated and invoked like any service in the digital ecosystem. The motivation in this paper is to offer design patterns that will guarantee short reaction times and minimal human intervention. Our proposal includes a cloud service model by developing a PaaS service that allows CATS adaptation. A new specification for the validation of CATS model has been also introduced using the ACTA formalism. Copyright {\textcopyright} 2022 Inderscience Enterprises Ltd.}, keywords = {ACTA, Cloud service models, Context- awareness, Context-Aware, Design Patterns, Distributed database systems, Key feature, ON dynamics, Operating environment, Paa, Platform as a Service (PaaS), Transactional service, Ubiquitous computing}, doi = {10.1504/IJWET.2022.125651}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85140992267\&doi=10.1504\%2fIJWET.2022.125651\&partnerID=40\&md5=60e1ee1cdbc8c6f1ce2d9b93a7d6fa0f}, author = {Ettazi, W. and Hafiddi, H. and Nassar, M.} } @conference {Gaidi2022205, title = {DIGITAL TRANSFORMATION: COMPLEXITY AND NEED FOR A FRAMEWORK FOR ITS MANAGEMENT}, booktitle = {Proceedings of the 15th IADIS International Conference Information Systems 2022, IS 2022}, year = {2022}, note = {cited By 0}, pages = {205-212}, abstract = {Digital Transformation has become an imperative for most organizations. It is a journey that has been induced by the accelerated pace of technological developments (Data, Cloud, IA, ML, RPA, Agility, {\textellipsis}). The integration of these technologies in almost every industry has fundamentally changed the operational processes of companies. It impacts business levels, starting with business models and ending with the customer journey. The adoption of these technologies has also offered an opportunity for these companies to grow, increase agility, strengthen governance models by eliminating traditional silos and ensure better knowledge and proximity to the customer. This theme, "Digital transformation", has been very widely covered in recent years, it has aroused great interest in research both in academia and in practice and has been the subject of several studies. This is a very complex subject and many question marks remain, particularly on the definition of digital transformation, we have identified more than 100 definitions spread over time and between sectors of activity. To approach it properly, it is essential to define or describe the Digital Transformation well first. The main objective of this article is to give a general review of the systematic literature on Digital Transformation, to bring together the different definitions given and to propose a metamodel to explain this notion and show the interactions between the different concepts concerned by this concept. Through this metamodel, we will show the complexity of this transformation and the need to use a Framework for the management of the Digital Transformation. {\textcopyright} 2022 CURRAN-CONFERENCE. All rights reserved.}, keywords = {Business models, Data clouds, Digital transformation, Digital transformation definition, Governance models, IT impact, Meta model, Operational process, Systematic literature review, Technological development}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85137664846\&partnerID=40\&md5=6a4afd6779e72d758df5d934070562c9}, author = {Gaidi, O. and Baina, S.} } @article {Elhachmi2022207, title = {Distributed reinforcement learning for dynamic spectrum allocation in cognitive radio-based internet of things}, journal = {IET Networks}, volume = {11}, number = {6}, year = {2022}, note = {cited By 0}, pages = {207-220}, abstract = {Cognitive Radio (CR) with other advancements such as the Internet of things and machine learning has recently emerged as the main involved technique to use spectrum in an efficient manner. It can access the spectrum in a fully dynamic way and exploit the unused spectrum resources without creating any harm to cognitive users. In this paper, the authors develop a CR access strategy founded on the implementation of an efficient Deep Multi-user Reinforcement Learning algorithm based on a combination of a Deep neural network, Q-learning, and cooperative multi-agent systems. The proposed approach consists of two stages: the user choice algorithm to set up an agent{\textquoteright}s activation order, and the frequency choice method to select the optimal channel on the appropriate bandwidth. Reasonable implementation is proposed, and the obtained results demonstrate that the authors{\textquoteright} approach can improve wireless communication for all CR terminals. It shows satisfactory performances in terms of user satisfaction degree and the number of used channels and can keep the channel allocation plan always in the appropriate state. {\textcopyright} 2022 The Authors. IET Networks published by John Wiley \& Sons Ltd on behalf of The Institution of Engineering and Technology.}, keywords = {Cognitive radio, Deep learning, Deep neural networks, Dynamic spectrum allocations, Intelligent agents, Internet of things, Learning algorithms, Machine-learning, Multi agent systems, Multiusers, Q-learning, Radio access, Reinforcement learning, Reinforcement learning algorithms, Reinforcement learnings, Spectra{\textquoteright}s, Spectrum allocation}, doi = {10.1049/ntw2.12051}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85136563113\&doi=10.1049\%2fntw2.12051\&partnerID=40\&md5=af41f0c1a8d989dead96990172cd073c}, author = {Elhachmi, J.} } @conference {Ajallouda2022, title = {Doc2Vec, SBERT, InferSent, and USE Which embedding technique for noun phrases?}, booktitle = {2022 2nd International Conference on Innovative Research in Applied Science, Engineering and Technology, IRASET 2022}, year = {2022}, note = {cited By 4}, abstract = {Phrase embedding is a technique of representing phrases in vector space. A very high effort has been made to develop this technique to improve tasking in various natural language processing (NLP) applications. The evaluation of phrase embedding has been presented in many studies, but most of them focused on the intrinsic or extrinsic evaluation process regardless of the type of the phrase (noun phrases, Verb phrases...). In the literature, there is no study evaluating the embedding of noun phrases, knowing that this type is used by many NLP applications, such as automatic key-phrase extraction (AKE), information retrieval, and question answering. In this article, we will present an empirical study to compare the most common phrase embedding techniques, to determine the most suitable for representing noun phrases. Dataset used in the comparison process consists of the noun phrases from the Inspec and SemEval2010 datasets, to which we have added their manually defined synonyms. {\textcopyright} 2022 IEEE.}, keywords = {Embedding technique, Embeddings, Empirical studies, Key-phrases extractions, Natural language processing applications, Natural language processing systems, Noun phrase, Phrase embedding technique, Question Answering, Vector spaces}, doi = {10.1109/IRASET52964.2022.9738300}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85127985676\&doi=10.1109\%2fIRASET52964.2022.9738300\&partnerID=40\&md5=f274393a28334ba6d338a5dc4e3b5f79}, author = {Ajallouda, L. and Najmani, K. and Zellou, A. and Benlahmar, E.H.} } @conference {Ettahiri2022714, title = {Dynamic Enterprise Architecture planning using case-based reasoning and blockchain}, booktitle = {Procedia Computer Science}, volume = {204}, year = {2022}, note = {cited By 0}, pages = {714-721}, abstract = {Nowadays, Enterprises are aware that the only existing certainty, is a future of great uncertainty (Allen Paulos). Therefore, the potential drivers of change should be detected and the solutions or path to solutions should be prepared in advance. In our work we adopt, the concept of Enterprise architecture, as the tool ensuring the alignment strategy//business//IT. So, to implement any change, we propose using CBR for case-based reasoning to store a set of problems with their solutions, and then to use algorithms of similarity and adaptation to propose the most adapted solution to the problem. Our goal in this paper is to maximize experiences and enrich the knowledge base so that the reasoning mechanism and similarity process is as reliable as possible. Thus, we suggest, a sharing of knowledge bases between allied or contracted companies or subsidiaries. (Example national strategy to be implemented regionally on a case-by-case basis) to ensure reliable sharing, we suggest the use of blockchain technology. {\textcopyright} 2022 Elsevier B.V.. All rights reserved.}, keywords = {Block-chain, Blockchain, Business-IT, Case based reasoning, Casebased reasonings (CBR), Dynamic aspects, EA planning, enterprise architecture, Enterprise architecture planning, Enterprise IS, Knowledge based systems, Memory architecture, Nearest-neighbor algorithms, Uncertainty}, doi = {10.1016/j.procs.2022.08.086}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85142888032\&doi=10.1016\%2fj.procs.2022.08.086\&partnerID=40\&md5=86f11d0e65286d500617f34193f7c127}, author = {Ettahiri, I. and Doumi, K.} } @article {Filali20222174, title = {Dynamic SDN-Based Radio Access Network Slicing With Deep Reinforcement Learning for URLLC and eMBB Services}, journal = {IEEE Transactions on Network Science and Engineering}, volume = {9}, number = {4}, year = {2022}, note = {cited By 3}, pages = {2174-2187}, abstract = {Radio access network (RAN) slicing is a key technology that enables 5G network to support heterogeneous requirements of generic services, namely ultra-reliable low-latency communication (URLLC) and enhanced mobile broadband (eMBB). In this paper, we propose a two time-scales RAN slicing mechanism to optimize the performance of URLLC and eMBB services. In a large time-scale, an SDN controller allocates radio resources to gNodeBs according to the requirements of the eMBB and URLLC services. In a short time-scale, each gNodeB allocates its available resources to its end-users and requests, if needed, additional resources from adjacent gNodeBs. We formulate this problem as a non-linear binary program and prove its NP-hardness. Next, for each time-scale, we model the problem as a Markov decision process (MDP), where the large-time scale is modeled as a single agent MDP whereas the shorter time-scale is modeled as a multi-agent MDP. We leverage the exponential-weight algorithm for exploration and exploitation (EXP3) to solve the single-agent MDP of the large time-scale MDP and the multi-agent deep Q-learning (DQL) algorithm to solve the multi-agent MDP of the short time-scale resource allocation. Extensive simulations show that our approach is efficient under different network parameters configuration and it outperforms recent benchmark solutions. {\textcopyright} 2013 IEEE.}, keywords = {5G mobile communication systems, Deep learning, EMBB, Heuristic algorithms, Heuristics algorithm, Learning algorithms, Low-latency communication, Markov processes, Multi agent systems, Network slicing, Optimisations, Optimization, Quality of service, Quality-of-service, Radio access networks, Reinforcement learning, Resource allocation, Resource Management, Software agents, Software defined networking, Software-defined networkings, Time measurement, Ultra reliable low latency communication, URLLC}, doi = {10.1109/TNSE.2022.3157274}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85126273319\&doi=10.1109\%2fTNSE.2022.3157274\&partnerID=40\&md5=44d6a41c3d7b8b6d8c4d6918856f36c5}, author = {Filali, A. and Mlika, Z. and Cherkaoui, S. and Kobbane, A.} } @conference {Berahhou2022, title = {Dynamic Vehicle Routing Problem with Simultaneous Delivery and Pickup, Overtime and Inventory Restriction: Golden Ball Algorithm}, booktitle = {2022 IEEE 6th International Conference on Logistics Operations Management, GOL 2022}, year = {2022}, note = {cited By 0}, abstract = {This article studies an important and special variant of VRP. The Dynamic Vehicle Routing Problem with Simultaneous Delivery and Pickup, Overtime and Inventory Restriction (DVRPSDP-OT-IR) and propose an improved Golden Ball Algorithm (GBA) based on soccer concepts to solve the problem. A literature review of the GBA is given. A mathematical model has been proposed for the problem and validated by Cplex solver. A detailed description of the steps of GBA is presented. {\textcopyright} 2022 IEEE.}, keywords = {DVRP, Dynamic vehicle routing problems, Golden ball algorithm, Inventory restriction, Literature reviews, Overtime, Pickups, Routing algorithms, Vehicle routing, VRPSDP}, doi = {10.1109/GOL53975.2022.9820268}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85135211662\&doi=10.1109\%2fGOL53975.2022.9820268\&partnerID=40\&md5=bee6326e667ddaeea4fde243d4e97f4a}, author = {Berahhou, A. and Benadada, Y.} } @article {Iraqi2021506, title = {Data Analytics in Investment Banks}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {12}, number = {5}, year = {2021}, note = {cited By 0}, pages = {506-513}, abstract = {Capital Markets are one of the most important pillars of worldwide economy. They gather skilled finance and IT professionals as well as economists in order to take the best investment decisions and choose the most suitable funding solutions every time. Data analytics projects in Capital Markets can definitely be very beneficial as all optimizations and innovations would have a financial impact, but can also be very challenging as the field itself has always incorporated a research component, thus finding out what could really be of an added value might be a tricky task. Based on a comprehensive literature review, this paper aims to structure the thoughts around data analytics in investment banks, and puts forward a classification of relevant data analytics use cases. Lastly, it also discusses how transforming to a data-driven enterprise is the real change investment banks should aim to achieve, and discusses some of the challenges that they might encounter when engaging in this transformation process. {\textcopyright} 2021. All Rights Reserved.}, keywords = {Commerce, Data analytic use case, Data Analytics, Data driven, Data-driven transformation, Financial impacts, Financial markets, Investment bank, Investment decisions, Investments, IT professional, Metadata, Optimisations}, doi = {10.14569/IJACSA.2021.0120562}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85107532071\&doi=10.14569\%2fIJACSA.2021.0120562\&partnerID=40\&md5=7237f6b38c7eedbe1f23bb2968802d60}, author = {Iraqi, B. and Benhiba, L. and Idrissi, M.A.J.} } @article {Guermah2021110, title = {Dealing with context awareness for service-oriented systems: An ontology-based approach}, journal = {International Journal of Service Science, Management, Engineering, and Technology}, volume = {12}, number = {4}, year = {2021}, note = {cited By 1}, pages = {110-131}, abstract = {In recent years, rapid advances in the enabling technologies for mobile and ubiquitous computing, software paradigms, embedded sensor technologies, and wide range of wired and wireless protocols have been witnessed. Specifically, context-aware services-oriented applications are emerging as the next computing paradigm in which infrastructure and services are seamlessly available. Contextawareness, being an important ingredient, plays a vital role in enabling such interactive smart environments. More recently, the increasing popularity of ontologies has led to new ontology-based models of context because of their potential to support sophisticated ontology-based reasoning methods. This paper presents an architecture for the development of context-aware services based on ontologies. The authors highlight the context metamodel and discuss about reasoning process. This research also presents the semantic approach for service adaptation in context aware environment. Copyright {\textcopyright} 2021, IGI Global.}, doi = {10.4018/IJSSMET.2021070107}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85108948717\&doi=10.4018\%2fIJSSMET.2021070107\&partnerID=40\&md5=57712a52da5717426a70263e22f08eea}, author = {Guermah, H. and Guermah, B. and Fissaa, T. and Hafiddi, H. and Nassar, M.} } @article {Benbriqa2021588, title = {Deep and Ensemble Learning Based Land Use and Land Cover Classification}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12951 LNCS}, year = {2021}, note = {cited By 1}, pages = {588-604}, abstract = {Monitoring of Land use and Land cover (LULC) changes is a highly encumbering task for humans. Therefore, machine learning based classification systems can help to deal with this challenge. In this context, this study evaluates and compares the performance of two Single Learning (SL) techniques and one Ensemble Learning (EL) technique. All the empirical evaluations were over the open source LULC dataset proposed by the German Center for Artificial Intelligence (EuroSAT), and used the performance criteria -accuracy, precision, recall, F1 score and change in accuracy for the EL classifiers-. We firstly evaluate the performance of SL techniques: Building and optimizing a Convolutional Neural Network architecture, implementing Transfer learning, and training Machine learning algorithms on visual features extracted by Deep Feature Extractors. Second, we assess EL techniques and compare them with SL classifiers. Finally, we compare the capability of EL and hyperparameter tuning to improve the performance of the Deep Learning models we built. These experiments showed that Transfer learning is the SL technique that achieves the highest accuracy and that EL can indeed outperform the SL classifiers. {\textcopyright} 2021, Springer Nature Switzerland AG.}, keywords = {Classification (of information), Convolutional neural networks, Deep feature extraction, Deep learning, Ensemble learning, Features extraction, Hyper-parameter optimizations, Land cover, Land use, Learning algorithms, Learning classifiers, Learning techniques, Network architecture, Performance, Transfer learning}, doi = {10.1007/978-3-030-86970-0_41}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85115689890\&doi=10.1007\%2f978-3-030-86970-0_41\&partnerID=40\&md5=910871e0f58b4f00460e5e2509929a23}, author = {Benbriqa, H. and Abnane, I. and Idri, A. and Tabiti, K.} } @conference {Abouaomar2021273, title = {A deep reinforcement learning approach for service migration in MEC-enabled vehicular networks}, booktitle = {Proceedings - Conference on Local Computer Networks, LCN}, volume = {2021-October}, year = {2021}, note = {cited By 4}, pages = {273-280}, abstract = {Multi-access edge computing (MEC) is a key enabler to reduce the latency of vehicular network. Due to the vehicles mobility, their requested services (e.g., infotainment services) should frequently be migrated across different MEC servers to guarantee their stringent quality of service requirements. In this paper, we study the problem of service migration in a MEC-enabled vehicular network in order to minimize the total service latency and migration cost. This problem is formulated as a nonlinear integer program and is linearized to help obtaining the optimal solution using off-the-shelf solvers. Then, to obtain an efficient solution, it is modeled as a multi-agent Markov decision process and solved by leveraging deep Q learning (DQL) algorithm. The proposed DQL scheme performs a proactive services migration while ensuring their continuity under high mobility constraints. Finally, simulations results show that the proposed DQL scheme achieves close-to-optimal performance. {\textcopyright} 2021 IEEE.}, keywords = {Deep learning, Edge computing, Infotainment, Integer programming, Learning schemes, Markov processes, Multi agent systems, Multi-access edge computing, Multiaccess, Q-learning, Quality of service, Reinforcement learning, Reinforcement learning approach, Service migration, Vehicle mobility, Vehicular networks}, doi = {10.1109/LCN52139.2021.9524882}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85118466383\&doi=10.1109\%2fLCN52139.2021.9524882\&partnerID=40\&md5=8e35dfee04f347da3f757f57f5d575c8}, author = {Abouaomar, A. and Mlika, Z. and Filali, A. and Cherkaoui, S. and Kobbane, A.} } @conference {Mrabet2021, title = {Dependable Decentralized Reputation Management System for Vehicular Ad Hoc Networks}, booktitle = {Proceedings - 4th International Conference on Advanced Communication Technologies and Networking, CommNet 2021}, year = {2021}, note = {cited By 0}, abstract = {Reputation management systems are essential for establishing trust among network users. They are tools for reinforcing cooperation and sanctioning malicious behavior. Their importance becomes a requirement in highly mobile and volatile environments, such as vehicular ad-hoc networks (VANET). In the present work, we propose a dynamic and decentralized reputation system that fits VANET characteristics like dynamism and volatility without conceding on security. The novel system uses blockchain for reputation information aggregation and storage and secure multiparty computation (SMC) to achieve privacy. Unlike previous VANET reputation systems, our system does not rely on a central authority to evaluate trustworthiness. Instead, it limits its role to maintaining the blockchain and system integrity leaving reputation evaluation to peers. It preserves feedback privacy in the presence of up to n - 2 dishonest parties and shows good performance. Remarkably, our system takes advantage of the infrastructure of VANET when vehicles are nearby while remaining fully functional in extra-urban areas. {\textcopyright} 2021 IEEE.}, keywords = {Block-chain, Blockchain, Cryptography, Decentralised, Management systems, Network security, Privacy, reputation, Reputation management, Secure multi-party computation, Security, Trust, Vehicular ad hoc networks, Vehicular Adhoc Networks (VANETs)}, doi = {10.1109/CommNet52204.2021.9641962}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85124029681\&doi=10.1109\%2fCommNet52204.2021.9641962\&partnerID=40\&md5=2f6bc5d0b3cdbf224ab74c6a02d48c13}, author = {Mrabet, K. and El Bouanani, F. and Ben-Azza, H.} } @article {Houmz2021, title = {Detecting the impact of software vulnerability on attacks: A case study of network telescope scans}, journal = {Journal of Network and Computer Applications}, volume = {195}, year = {2021}, note = {cited By 1}, abstract = {Network scanning is one of the first steps in gathering information about a target before launching attacks. It is used to scan for vulnerable devices and exposed services in order to exploit them. Such exploits can result in data breaches or network disruption, which can be very costly for organizations. There are many factors, including technical and non-technical, affecting the volume of scanning activities. In this paper, we study the impact of vulnerability disclosure on the volume of scans over time and propose a machine learning-based approach to predict this impact. We conducted a comprehensive data collection of network scans from two network telescopes hosted in different countries, as well as the disclosed vulnerabilities from 2014 to 2019. We then designed a set of features to characterize the disclosed vulnerabilities and used several classifiers to predict whether a vulnerability will impact the volume of daily scans. The resulting classifier achieves over 85\% accuracy in predicting the impact. In addition, we performed an analysis of the key characteristics of vulnerabilities that directly affect scanning activities. Our findings show that this approach is able to classify vulnerabilities that have an impact on network scans. The implementation of our model and validation tests proved the efficiency of the selected features, as well as the robustness of our model to classify vulnerabilities{\textquoteright} impact on scans. {\textcopyright} 2021 Elsevier Ltd}, keywords = {Case-studies, Classification algorithm, CVE, Forecasting, Machine learning, Network scan, Network scanning, Network security, Network telescopes, NVD, OR-networks, Scanning, Software vulnerabilities, Telescopes, Times series}, doi = {10.1016/j.jnca.2021.103230}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85116335984\&doi=10.1016\%2fj.jnca.2021.103230\&partnerID=40\&md5=1d7f8d5d4111761b27fc93badc70f925}, author = {Houmz, A. and Mezzour, G. and Zkik, K. and Ghogho, M. and Benbrahim, H.} } @conference {Bayram202181, title = {Determining a Person{\textquoteright}s Suicide Risk by Voting on the Short-Term History of Tweets for the CLPsych 2021 Shared Task}, booktitle = {Computational Linguistics and Clinical Psychology: Improving Access, CLPsych 2021 - Proceedings of the 7th Workshop, in conjunction with NAACL 2021}, year = {2021}, note = {cited By 8}, pages = {81-86}, abstract = {In this shared task, we accept the challenge of constructing models to identify Twitter users who attempted suicide based on their tweets 30 and 182 days before the adverse event{\textquoteright}s occurrence. We explore multiple machine learning and deep learning methods to identify a person{\textquoteright}s suicide risk based on the short-term history of their tweets. Taking the real-life applicability of the model into account, we make the design choice of classifying on the tweet level. By voting the tweet-level suicide risk scores through an ensemble of classifiers, we predict the suicidal users 30-days before the event with an 81.8\% true-positives rate. Meanwhile, the tweet-level voting falls short on the six-month-long data as the number of tweets with weak suicidal ideation levels weakens the overall suicidal signals in the long term. {\textcopyright}2021 Association for Computational Linguistics.}, keywords = {Adverse events, Constructing models, Deep learning, Ensemble of classifiers, Learning methods, Risk score, Risk-based, Short-term history, Suicidal ideation, True positive rates}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111364845\&partnerID=40\&md5=a90a61db35a9b8d92bdaa76eb1e44f49}, author = {Bayram, U. and Benhiba, L.} } @article {Daosabah2021324, title = {Dynamic composition of services: an approach driven by the user{\textquoteright}s intention and context}, journal = {International Journal of Web Engineering and Technology}, volume = {16}, number = {4}, year = {2021}, note = {cited By 1}, pages = {324-354}, abstract = {With the continuous development of services in ubiquitous systems, service orientation is becoming increasingly important in its structuring. As a result, the design and the development of applications are gradually migrating from a traditional model to a service-oriented model. In this regard, we propose through this work an approach for web service (WS) composition guided by the context and the intention, by which we suggest an architecture for service composition that reduces the complexity of the generated artificial intelligence (AI) planning problem and to ensure the interoperability of any system independently of the domain planners. The general idea of this approach is to conceive an intentional-contextual metamodel that will be transformed into an OWL model using OMG standards, which will be used to map the WS composition problem into AI planning problems. This article describes the architectural, conceptual, and strategic aspects to deal with the WS composition problem. {\textcopyright} 2021 Inderscience Enterprises Ltd.}, keywords = {Artificial intelligence, Birds, Context- awareness, Intention, interoperability, Object management, Object management group, OMG, Ontology{\textquoteright}s, OWL-S, Quality of service, Services composition, Spring cloud framework, Web service composition, Web services}, doi = {10.1504/IJWET.2021.122768}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85124011575\&doi=10.1504\%2fIJWET.2021.122768\&partnerID=40\&md5=f9387d7a26ae641892da27f98947542d}, author = {Daosabah, A. and Guermah, H. and Nassar, M.} } @article {BENHAR2020105635, title = {Data preprocessing for heart disease classification: A systematic literature review}, journal = {Computer Methods and Programs in Biomedicine}, volume = {195}, year = {2020}, pages = {105635}, abstract = {Context Early detection of heart disease is an important challenge since 17.3 million people yearly lose their lives due to heart diseases. Besides, any error in diagnosis of cardiac disease can be dangerous and risks an individual{\textquoteright}s life. Accurate diagnosis is therefore critical in cardiology. Data Mining (DM) classification techniques have been used to diagnosis heart diseases but still limited by some challenges of data quality such as inconsistencies, noise, missing data, outliers, high dimensionality and imbalanced data. Data preprocessing (DP) techniques were therefore used to prepare data with the goal of improving the performance of heart disease DM based prediction systems. Objective The purpose of this study is to review and summarize the current evidence on the use of preprocessing techniques in heart disease classification as regards: (1) the DP tasks and techniques most frequently used, (2) the impact of DP tasks and techniques on the performance of classification in cardiology, (3) the overall performance of classifiers when using DP techniques, and (4) comparisons of different combinations classifier-preprocessing in terms of accuracy rate. Method A systematic literature review is carried out, by identifying and analyzing empirical studies on the application of data preprocessing in heart disease classification published in the period between January 2000 and June 2019. A total of 49 studies were therefore selected and analyzed according to the aforementioned criteria. Results The review results show that data reduction is the most used preprocessing task in cardiology, followed by data cleaning. In general, preprocessing either maintained or improved the performance of heart disease classifiers. Some combinations such as (ANN~+~PCA), (ANN~+~CHI) and (SVM~+~PCA) are promising terms of accuracy. However the deployment of these models in real-world diagnosis decision support systems is subject to several risks and limitations due to the lack of interpretation.}, keywords = {Cardiac datasets, Cardiology, Data preprocessing, Datamining, Literature review}, issn = {0169-2607}, doi = {https://doi.org/10.1016/j.cmpb.2020.105635}, url = {https://www.sciencedirect.com/science/article/pii/S0169260720314681}, author = {H. Benhar and A. Idri and J.L. Fern{\'a}ndez-Alem{\'a}n} } @article {Benhar2020, title = {Data preprocessing for heart disease classification: A systematic literature review.}, journal = {Computer Methods and Programs in Biomedicine}, volume = {195}, year = {2020}, note = {cited By 25}, abstract = {Context: Early detection of heart disease is an important challenge since 17.3 million people yearly lose their lives due to heart diseases. Besides, any error in diagnosis of cardiac disease can be dangerous and risks an individual{\textquoteright}s life. Accurate diagnosis is therefore critical in cardiology. Data Mining (DM) classification techniques have been used to diagnosis heart diseases but still limited by some challenges of data quality such as inconsistencies, noise, missing data, outliers, high dimensionality and imbalanced data. Data preprocessing (DP) techniques were therefore used to prepare data with the goal of improving the performance of heart disease DM based prediction systems. Objective: The purpose of this study is to review and summarize the current evidence on the use of preprocessing techniques in heart disease classification as regards: (1) the DP tasks and techniques most frequently used, (2) the impact of DP tasks and techniques on the performance of classification in cardiology, (3) the overall performance of classifiers when using DP techniques, and (4) comparisons of different combinations classifier-preprocessing in terms of accuracy rate. Method: A systematic literature review is carried out, by identifying and analyzing empirical studies on the application of data preprocessing in heart disease classification published in the period between January 2000 and June 2019. A total of 49 studies were therefore selected and analyzed according to the aforementioned criteria. Results: The review results show that data reduction is the most used preprocessing task in cardiology, followed by data cleaning. In general, preprocessing either maintained or improved the performance of heart disease classifiers. Some combinations such as (ANN + PCA), (ANN + CHI) and (SVM + PCA) are promising terms of accuracy. However the deployment of these models in real-world diagnosis decision support systems is subject to several risks and limitations due to the lack of interpretation. {\textcopyright} 2020 Elsevier B.V.}, keywords = {Cardiology, Classification (of information), Classification technique, classifier, clinical practice, clinical research, Computer aided diagnosis, data classification, Data mining, Data preprocessing, data processing, Decision support systems, Deep learning, Diagnosis decision, diagnostic accuracy, disease classification, Diseases, empiricism, evidence based practice, feature selection, Heart, heart disease, Heart Diseases, High dimensionality, human, Humans, intermethod comparison, Machine learning, Performance of classifier, prediction, Prediction systems, Preprocessing techniques, publication, Review, Support vector machines, Systematic literature review, Systematic Review, task performance}, doi = {10.1016/j.cmpb.2020.105635}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85087500300\&doi=10.1016\%2fj.cmpb.2020.105635\&partnerID=40\&md5=cae53ce36903d5d8b817ec96deb39b45}, author = {Benhar, H. and Idri, A. and L Fern{\'a}ndez-Alem{\'a}n, J.} } @article {Chlioui2020547, title = {Data preprocessing in knowledge discovery in breast cancer: systematic mapping study}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, volume = {8}, number = {5}, year = {2020}, note = {cited By 5}, pages = {547-561}, abstract = {Data Mining (DM) is a set of techniques that allow to analyse data from different perspectives and summarising it into useful information. Data mining has been increasingly used in medicine, especially in oncology. Data preprocessing is the most important step of knowledge extraction process and allows to improve the performance of the DM models. Breast cancer (BC) becomes the most common cancer among females worldwide and the leading cause of women{\textquoteright}s death. This paper aims to perform a systematic mapping study to analyse and synthesise studies on the application of preprocessing techniques for a DM task in breast cancer.Therefore, 66 relevant articles published between 2000 and October 2018 were selected and analysed according to five criteria: year/channel of publication, research type, medical task, empirical type and preprocessing task. The results show that Conferences and journals are the most targeted publication sources, researchers were more interested in applying preprocessing techniques for the diagnosis of BC, historical-based evaluation was the most used empirical type in the evaluation of preprocessing techniques in BC, and data reduction was the most investigated task of preprocessing in BC. However, A low number of papers discussed treatment which encourages researchers to devote more efforts to this task. {\textcopyright} 2020 Informa UK Limited, trading as Taylor \& Francis Group.}, keywords = {algorithm, Article, Breast Cancer, cancer classification, cancer prognosis, clinical assessment, clinical outcome, Data mining, Data mining models, Data mining tasks, Data preprocessing, Diagnosis, diagnostic accuracy, Diseases, Extraction process, health promotion, human, image analysis, knowledge, knowledge discovery, Knowledge extraction, Machine learning, Mapping, Medical informatics, nerve cell network, neural crest cell, Performance, Pre-processing techniques, processing, screening test, Systematic mapping studies, Systematic Review, validity}, doi = {10.1080/21681163.2020.1730974}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85080112312\&doi=10.1080\%2f21681163.2020.1730974\&partnerID=40\&md5=befb1bc3f31f676a8e95bbc5bff5ab6d}, author = {Chlioui, I. and Idri, A. and Abnane, I.} } @conference {Gogolou20201857, title = {Data Series Progressive Similarity Search with Probabilistic Quality Guarantees}, booktitle = {Proceedings of the ACM SIGMOD International Conference on Management of Data}, year = {2020}, note = {cited By 18}, pages = {1857-1873}, abstract = {Existing systems dealing with the increasing volume of data series cannot guarantee interactive response times, even for fundamental tasks such as similarity search. Therefore, it is necessary to develop analytic approaches that support exploration and decision making by providing progressive results, before the final and exact ones have been computed. Prior works lack both efficiency and accuracy when applied to large-scale data series collections. We present and experimentally evaluate a new probabilistic learning-based method that provides quality guarantees for progressive Nearest Neighbor (NN) query answering. We provide both initial and progressive estimates of the final answer that are getting better during the similarity search, as well suitable stopping criteria for the progressive queries. Experiments with synthetic and diverse real datasets demonstrate that our prediction methods constitute the first practical solution to the problem, significantly outperforming competing approaches. {\textcopyright} 2020 Association for Computing Machinery.}, keywords = {Analytic approach, Decision making, Nearest neighbors, Practical solutions, Prediction methods, Probabilistic Learning, Progressive queries, Query processing, Search engines, Similarity search, Stopping criteria}, doi = {10.1145/3318464.3389751}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85084509188\&doi=10.1145\%2f3318464.3389751\&partnerID=40\&md5=71d788a1cdf706d6ae4f9dfa38089a7f}, author = {Gogolou, A. and Tsandilas, T. and Echihabi, K. and Bezerianos, A. and Palpanas, T.} } @article {LHAZMIR2020102102, title = {A decision-making analysis in UAV-enabled wireless power transfer for IoT networks}, journal = {Simulation Modelling Practice and Theory}, volume = {103}, year = {2020}, pages = {102102}, abstract = {We consider an IoT network with energy-harvesting capabilities. To extend the network lifetime, we propose a novel unmanned aerial vehicle (UAV)- enabled wireless power transfer (WPT) system, where UAVs move among IoT devices and act as data aggregators and wireless power providers. This paper addresses the decision-making problem since the limited buffer and energy resources constrain all nodes. Each IoT node must decide on whether to request a data transmission, to ask for a wireless energy transfer or to abstain and not take any action. When a UAV receives a request from an IoT device, either for data reception or wireless energy transmission, it has to accept or decline. In this paper, we aim to find a proper packet delivery and energy transfer policy according to the system state that maximizes the data transmission efficiency of the system. We first formulate the problem as a Markov Decision Process (MDP) to tackle the successive decision issues, to optimize a utility for each node upon a casual environment. As the MDP formalism achieves its limits when the interactions between different nodes are considered, we formulate the problem as a Graph-based MDP (GMDP). The transition functions and rewards are then decomposed into local functions, and a graph illustrates the dependency{\textquoteright} relations among the nodes. To obtain the optimal policy despite the system{\textquoteright}s variations, Mean-Field Approximation (MFA) and Approximate linear-programming (ALP) algorithms were proposed to solve the GMDP problem.}, keywords = {GMDP, Internet of things, Unmanned aerial vehicle, Wireless energy transfer}, issn = {1569-190X}, doi = {https://doi.org/10.1016/j.simpat.2020.102102}, url = {https://www.sciencedirect.com/science/article/pii/S1569190X2030040X}, author = {Safae Lhazmir and Omar Ait Oualhaj and Abdellatif Kobbane and Lynda Mokdad} } @article {Lhazmir2020, title = {A decision-making analysis in UAV-enabled wireless power transfer for IoT networks}, journal = {Simulation Modelling Practice and Theory}, volume = {103}, year = {2020}, note = {cited By 8}, abstract = {We consider an IoT network with energy-harvesting capabilities. To extend the network lifetime, we propose a novel unmanned aerial vehicle (UAV)- enabled wireless power transfer (WPT) system, where UAVs move among IoT devices and act as data aggregators and wireless power providers. This paper addresses the decision-making problem since the limited buffer and energy resources constrain all nodes. Each IoT node must decide on whether to request a data transmission, to ask for a wireless energy transfer or to abstain and not take any action. When a UAV receives a request from an IoT device, either for data reception or wireless energy transmission, it has to accept or decline. In this paper, we aim to find a proper packet delivery and energy transfer policy according to the system state that maximizes the data transmission efficiency of the system. We first formulate the problem as a Markov Decision Process (MDP) to tackle the successive decision issues, to optimize a utility for each node upon a casual environment. As the MDP formalism achieves its limits when the interactions between different nodes are considered, we formulate the problem as a Graph-based MDP (GMDP). The transition functions and rewards are then decomposed into local functions, and a graph illustrates the dependency{\textquoteright} relations among the nodes. To obtain the optimal policy despite the system{\textquoteright}s variations, Mean-Field Approximation (MFA) and Approximate linear-programming (ALP) algorithms were proposed to solve the GMDP problem. {\textcopyright} 2020 Elsevier B.V.}, keywords = {Antennas, Approximate linear programming, Approximation algorithms, Behavioral research, Data communication systems, Data transfer, Data transmission efficiency, Decision making, Decision making analysis, Decision-making problem, Energy harvesting, Energy resources, Energy transfer, Graph theory, Graphic methods, Inductive power transmission, Internet of things, Linear programming, Markov Decision Processes, Markov processes, Mean field approximation, Unmanned aerial vehicles (UAV), Wireless energy transfers, Wireless power transfer (WPT)}, doi = {10.1016/j.simpat.2020.102102}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85084066274\&doi=10.1016\%2fj.simpat.2020.102102\&partnerID=40\&md5=6a1c9e107244a170a782e61d0f9755cc}, author = {Lhazmir, S. and Oualhaj, O.A. and Kobbane, A. and Mokdad, L.} } @article {ElIdrissi2020379, title = {Deep Learning for Blood Glucose Prediction: CNN vs LSTM}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {12250 LNCS}, year = {2020}, note = {cited By 6}, pages = {379-393}, abstract = {To manage their disease, diabetic patients need to control the blood glucose level (BGL) by monitoring it and predicting its future values. This allows to avoid high or low BGL by taking recommended actions in advance. In this study, we propose a Convolutional Neural Network (CNN) for BGL prediction. This CNN is compared with Long-short-term memory (LSTM) model for both one-step and multi-steps prediction. The objectives of this work are: 1) Determining the best configuration of the proposed CNN, 2) Determining the best strategy of multi-steps forecasting (MSF) using the obtained CNN for a prediction horizon of 30~min, and 3) Comparing the CNN and LSTM models for one-step and multi-steps prediction. Toward the first objective, we conducted series of experiments through parameter selection. Then five MSF strategies are developed for the CNN to reach the second objective. Finally, for the third objective, comparisons between CNN and LSTM models are conducted and assessed by the Wilcoxon statistical test. All the experiments were conducted using 10 patients{\textquoteright} datasets and the performance is evaluated through the Root Mean Square Error. The results show that the proposed CNN outperformed significantly the LSTM model for both one-step and multi-steps prediction and no MSF strategy outperforms the others for CNN. {\textcopyright} 2020, Springer Nature Switzerland AG.}, keywords = {Blood, Blood glucose, Blood glucose level, Convolutional neural networks, Deep learning, Diabetic patient, Disease control, Forecasting, Glucose, Long short-term memory, Mean square error, Multi-step, Parameter selection, Prediction horizon, Root mean square errors}, doi = {10.1007/978-3-030-58802-1_28}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85093089262\&doi=10.1007\%2f978-3-030-58802-1_28\&partnerID=40\&md5=f16d576a801dcdbbac35f8593e4a50ea}, author = {El Idrissi, T. and Idri, A.} } @article {Mohamed2020335, title = {Detecting text in license plates using a novel MSER-based method}, journal = {International Journal of Data Analysis Techniques and Strategies}, volume = {12}, number = {4}, year = {2020}, note = {cited By 2}, pages = {335-348}, abstract = {A new license plate detection method is proposed in this paper. The proposed approach consists of three steps: the first step aims to delete some details in the input image by converting it to a grey-level image and inverse it (negative) and then use MSER for the extraction of text in candidate regions. The second step is based on a dynamic grouped DBSCAN algorithm for a fast classification of the connected region, and the outer tangent of circles intersections for filtering regions with the same orientations. Finally, a geometrical and statistical character filter is used to eliminate false detections in the third step. Experimental results show that our approach performs better and achieves a better detection than that proposed by Yin et al. (2014). {\textcopyright} 2020 Inderscience Enterprises Ltd.}, doi = {10.1504/IJDATS.2020.111488}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85097276428\&doi=10.1504\%2fIJDATS.2020.111488\&partnerID=40\&md5=ec0868e89fae7b8ade5bbd67748009dd}, author = {Mohamed, A. and Sanaa, E.F. and Faizi, R.} } @article {Mensou2020, title = {A direct power control of a DFIG based-WECS during symmetrical voltage dips}, journal = {Protection and Control of Modern Power Systems}, volume = {5}, number = {1}, year = {2020}, note = {cited By 42}, abstract = {The Wind Energy Conversion System (WECS) based Doubly Fed Induction Generator (DFIG) has experienced a rapid development in the world, which leads to an increasing insertion of this source of energy in the electrical grids. The sudden and temporary drop of voltage at the network can affect the operation of the DFIG; the voltage dips produce high peak currents on the stator and rotor circuits, without protection, the rotor side converter (RSC) will suffer also from over-current limit, consequently, the RSC may even be destroyed and the generator be damaged. In this paper a new Direct Power Control (DPC) method was developed, in order to control the stator powers and help the operation of the aero-generator during the faults grid; by injecting the reactive power into the network to contribute to the return of voltage, and set the active power to the optimum value to suppress the high peak currents. The DPC method was designed using the nonlinear Backstepping (BS) controller associated with the Lyapunov function to ensure the stability and robustness of the system. A comparison study was undertaken to verify the robustness and effectiveness of the DPC-BS to that of the classical vector control (VC) using Proportional-Integral (PI) correctors. All were simulated under the Simulink{\textregistered} software. {\textcopyright} 2020, The Author(s).}, keywords = {Aero-generator, Asynchronous generators, Backstepping, Backstepping technique, Doubly fed induction generators, DPC strategy, Electric equipment protection, Electric fault currents, Electric machine control, Electric power system control, Electric power transmission networks, Energy conversion, Lyapunov functions, Power control, Robustness (control systems), Rotorside converter (RSC), Stability and robustness, Stators, Two term control systems, Voltage dip, Wind energy conversion system, Wind power}, doi = {10.1186/s41601-019-0148-y}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078233240\&doi=10.1186\%2fs41601-019-0148-y\&partnerID=40\&md5=522588067926dd57f4d68f2b24a05345}, author = {Mensou, S. and Essadki, A. and Nasser, T. and Bououlid Idrissi, B.} } @article {Illi202013996, title = {On the Distribution of the Sum of M{\'a}laga-M Random Variables and Applications}, journal = {IEEE Transactions on Vehicular Technology}, volume = {69}, number = {11}, year = {2020}, note = {cited By 3}, pages = {13996-14000}, abstract = {In this paper, a very accurate approximation method for the statistics of the sum of M{\'a}laga-M random variates with pointing error (MRVs) is proposed. In particular, the probability density function of MRV is approximated by a Fox{\textquoteright}s H-function through the moment-based approach. Then, the respective moment-generating function of the sum of N MRVs is provided, based on which the average symbol error rate is evaluated for an N-branch maximal-ratio combining (MRC) receiver. The retrieved results show that the proposed approximate results match accurately with the exact simulated ones. Additionally, the results show that the achievable diversity order increases as a function of the number of MRC diversity branches. {\textcopyright} 1967-2012 IEEE.}, keywords = {Approximate results, Approximation methods, Average symbol error rate (SER), Diversity order, Error statistics, Maximal ratio combining (MRC) receivers, Moment generating function, Pointing errors, Probability density function, Random variates}, doi = {10.1109/TVT.2020.3025405}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85096323448\&doi=10.1109\%2fTVT.2020.3025405\&partnerID=40\&md5=983059a7dbdf860db83a81d1454d56fc}, author = {Illi, E. and Bouanani, F.E. and Ayoub, F.} } @article {Mrhar202072, title = {A dropout predictor system in moocs based on neural networks}, journal = {Journal of Automation, Mobile Robotics and Intelligent Systems}, volume = {14}, number = {4}, year = {2020}, note = {cited By 2}, pages = {72-80}, abstract = {Massive open online courses, MOOCs, are a recent phenomenon that has achieved a tremendous media attention in the online education world. Certainly, the MOOCs have brought interest among the learners (given the number of enrolled learners in these courses). Nevertheless, the rate of dropout in MOOCs is very important. Indeed, a limited number of the enrolled learners complete their courses. The high dropout rate in MOOCs is perceived by the educator{\textquoteright}s community as one of the most important problems. It{\textquoteright}s related to diverse aspects, such as the motivation of the learners, their expectations and the lack of social interactions. However, to solve this problem, it is necessary to predict the likelihood of dropout in order to propose an appropriate intervention for learners at-risk of dropping out their courses. In this paper, we present a dropout predictor model based on a neural network algorithm and sentiment analysis feature that used the clickstream log and forum post data. Our model achieved an average AUC (Area under the curve) as high as 90\% and the model with the feature of the learner{\textquoteright}s sentiments analysis attained average increase in AUC of 0.5\%. {\textcopyright} 2020, Industrial Research Institute for Automation and Measurements. All rights reserved.}, doi = {10.14313/JAMRIS/4-2020/48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85102757989\&doi=10.14313\%2fJAMRIS\%2f4-2020\%2f48\&partnerID=40\&md5=4cbaa0f7f458ce320541450af5ff8a46}, author = {Mrhar, K. and Douimi, O. and Abik, M.} } @article {Mensou20201759, title = {Dspace DS1104 implementation of a robust nonlinear controller applied for DFIG driven by wind turbine}, journal = {Renewable Energy}, volume = {147}, year = {2020}, note = {cited By 23}, pages = {1759-1771}, abstract = {This research paper deals whit the control of a doubly fed induction generator (DFIG) driven in rotation by wind turbine, the objective of this work is to control the electromagnetic torque of the generator and have a specified reactive stator power generated whatever perturbations, the MPPT method was used to extract the maximum of the power for different wind speed variations. To control the system studied we have used the Nonlinear Backstepping Controller with Lyapunov function, the robustness of our controller is tested in terms of reference tracking under perturbed conditions (generator parameters variations and voltage dips). All were implemented on dSPACE DS1104 Controller card Real-Time Interface (RTI), which runs in Simulink/MATLAB environment and ControlDesk 4.2 graphical interfaces. Hardware simulation results show and validate the performances and robustness of our controller. {\textcopyright} 2019 Elsevier Ltd}, keywords = {Asynchronous generators, Backstepping, Backstepping controller, Controllers, D-space, DFIG, Doubly fed induction generators, Electric fault currents, Electric machine control, Electromagnetic torques, energy planning, Lyapunov functions, Mppt algorithms, Non-linear controllers, Nonlinear backstepping, perturbation, research work, Robustness (control systems), rotation, torque, Tracking, Wind, Wind power, wind turbine, Wind turbines, wind velocity}, doi = {10.1016/j.renene.2019.09.042}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85072723940\&doi=10.1016\%2fj.renene.2019.09.042\&partnerID=40\&md5=2c1b6dd07049aedcc235d2f82f481f9e}, author = {Mensou, S. and Essadki, A. and Nasser, T. and Idrissi, B.B. and Ben Tarla, L.} } @article {MENSOU20201759, title = {Dspace DS1104 implementation of a robust nonlinear controller applied for DFIG driven by wind turbine}, journal = {Renewable Energy}, volume = {147}, year = {2020}, pages = {1759-1771}, abstract = {This research paper deals whit the control of a doubly fed induction generator (DFIG) driven in rotation by wind turbine, the objective of this work is to control the electromagnetic torque of the generator and have a specified reactive stator power generated whatever perturbations, the MPPT method was used to extract the maximum of the power for different wind speed variations. To control the system studied we have used the Nonlinear Backstepping Controller with Lyapunov function, the robustness of our controller is tested in terms of reference tracking under perturbed conditions (generator parameters variations and voltage dips). All were implemented on dSPACE DS1104 Controller card Real-Time Interface (RTI), which runs in Simulink/MATLAB environment and ControlDesk 4.2 graphical interfaces. Hardware simulation results show and validate the performances and robustness of our controller.}, keywords = {Backstepping controller, DFIG, dSPACE board, MPPT algorithm, Wind energy}, issn = {0960-1481}, doi = {https://doi.org/10.1016/j.renene.2019.09.042}, url = {https://www.sciencedirect.com/science/article/pii/S0960148119313722}, author = {Sara Mensou and Ahmed Essadki and Tamou Nasser and Badre Bououlid Idrissi and Lahssan Ben Tarla} } @article {Merabet2020472, title = {A dynamic model for human thermal comfort for smart building applications}, journal = {Proceedings of the Institution of Mechanical Engineers. Part I: Journal of Systems and Control Engineering}, volume = {234}, number = {4}, year = {2020}, note = {cited By 3}, pages = {472-483}, abstract = {Thermal comfort is closely related to the evaluation of heating, ventilation, and air conditioning systems. It can be seen as the result of the perception of the occupants of a given environment, and it is the product of the interaction of a number of personal and environmental factors. Otherwise, comfort issues still do not play an important role in the daily operation of commercial buildings. However, in the workplace, local quality effects, in addition to the health, the productivity that has a significant impact on the performance of the activities. In this regard, researchers have conducted, for decades, investigations related to thermal comfort and indoor environments, which includes developing models and indices through experimentations to establish standards to evaluate comfort and factors and set-up parameters for heating, ventilation, and air conditioning systems. However, to our best knowledge, most of the research work reported in the literature deals only with parameters that are not dynamically tracked. This work aims to propose a prototype for comfort measuring through a wireless sensor network and then presenting a model for thermal comfort prediction. The developed model can be used to set up a heating, ventilation, and air conditioning system to meet the expected comfort level. In particular, the obtained results show that there is a strong correlation between users{\textquoteright} comfort and variables such as age, gender, and body mass index as a function of height and weight. {\textcopyright} IMechE 2019.}, keywords = {Air conditioning, Anthropometric parameters, Building applications, Commercial building, Dynamic models, Environmental factors, Heating, Human thermal comfort, Indoor environment, Intelligent buildings, Office buildings, Strong correlation, Thermal comfort, Thermal sensations, Ventilation, Wireless sensor networks}, doi = {10.1177/0959651819865795}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85070370072\&doi=10.1177\%2f0959651819865795\&partnerID=40\&md5=c43c4ba3e481928391ac3b08d4bea6df}, author = {Merabet, G.H. and Essaaidi, M. and Benhaddou, D.} } @conference {Berahhou2020, title = {Dynamic vehicle routing problem with simultaneous delivery and pickup: Formulation and resolution}, booktitle = {Proceedings - 2020 5th International Conference on Logistics Operations Management, GOL 2020}, year = {2020}, note = {cited By 3}, abstract = {This article treat a fundamental optimization problem encountered by most distribution companies. The Dynamic Vehicle Routing Problem with Simultaneous Delivery and Pickup (DVRPSDP) is a special variant of the VRP, in which new clients comes when the working day has already begun, and a number of vehicles have started their tours, in a way that each customer requires simultaneous delivery and pickup. These new customers must be included in the planned tours as early as possible with a minimum total travel distance. A rich literature review of the problem is carried out. A mathematical model has been formulated for the DVRPSDP, and it is tested with Cplex to provide optimal solutions for small instances. A memetic algorithm has been used to resolve the problem, the proposed algorithm is a combination of the genetic algorithm with a local search method. {\textcopyright} 2020 IEEE.}, keywords = {Distribution companies, Dynamic vehicle routing problems, Genetic algorithms, Literature reviews, Local search method, Memetic algorithms, Number of vehicles, Optimal solutions, Optimization problems, Pickups, Vehicle routing, Vehicles}, doi = {10.1109/GOL49479.2020.9314759}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85100214538\&doi=10.1109\%2fGOL49479.2020.9314759\&partnerID=40\&md5=103d048003f32e61f1cb306492ddc85f}, author = {Berahhou, A. and Benadada, Y.} } @conference {Zaazaa2020, title = {Dynamic vulnerability detection approaches and tools: State of the Art}, booktitle = {4th International Conference on Intelligent Computing in Data Sciences, ICDS 2020}, year = {2020}, note = {cited By 0}, abstract = {Vulnerabilities are everywhere around us. Every device we use in our daily life include a software that may contain vulnerabilities. The growth use of software and devices to automate some of our daily life actions is making these programs more complex and more connected to the internet, which increase the risk of cyber-attacks. To reduce this risk, multiple programming companies are trying to use different approaches to find these vulnerabilities. Some are using static approaches during the software development life cycle while others are using dynamic analysis approaches to find vulnerabilities once the application is correctly working. Unfortunately, both approaches still suffer from multiple limitation and still need improvement. In this paper, we are discussing some of the most recent dynamic approaches and the efficient of the tools that use them. {\textcopyright} 2020 IEEE.}, keywords = {Analysis approach, Application programs, Cyber-attacks, Daily lives, Data Science, Dynamic approaches, Intelligent computing, Network security, Software design, Software development life cycle, State of the art, Static approach, Vulnerability detection}, doi = {10.1109/ICDS50568.2020.9268686}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85098666133\&doi=10.1109\%2fICDS50568.2020.9268686\&partnerID=40\&md5=faef5d40937e4c62a3326a3576d0e8e8}, author = {Zaazaa, O. and El Bakkali, H.} } @conference {Lakhili201912, title = {Deformable 3D Shape Classification Using 3D Racah Moments and Deep Neural Networks}, booktitle = {Procedia Computer Science}, volume = {148}, year = {2019}, note = {cited By 0}, pages = {12-20}, doi = {10.1016/j.procs.2019.01.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062651075\&doi=10.1016\%2fj.procs.2019.01.002\&partnerID=40\&md5=4a812fcbc857dbf995b08a055c11fc9d}, author = {Lakhili, Z. and El Alami, A. and Mesbah, A. and Berrahou, A. and Qjidaa, H.} } @article {Badidi2018369, title = {A DaaS Based Framework for IoT Data Provisioning}, journal = {Advances in Intelligent Systems and Computing}, volume = {661}, year = {2018}, note = {cited By 0}, pages = {369-379}, abstract = {As a result of the remarkable advances in sensing and digital communication technologies, massive amounts of data are generated by Internet of Things (IoT) devices. In this paper, we propose a framework for data provisioning that relies on the Data-as-a-Service (DaaS) cloud delivery model. IoT Data is processed and aggregated at the edge of where it is generated, then, it is stored in the cloud. Since smart connected devices are proliferating at an unprecedented pace, we anticipate that the number of DaaS providers will also grow at the same rate. Therefore, data consumers will seek to find suitable DaaS providers that can provide them with high-quality data. DaaS Brokers will emerge to mediate between data providers and data consumers and help data consumers find appropriate DaaS providers using different ranking algorithms. We propose an algorithm to evaluate the offers of potential DaaS providers based on the quality-of-data (QoD) requirements of the data consumer and using fitness functions associated with each QoD dimension. {\textcopyright} 2018, Springer International Publishing AG.}, doi = {10.1007/978-3-319-67618-0_34}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029599349\&doi=10.1007\%2f978-3-319-67618-0_34\&partnerID=40\&md5=e76f069dc00af29313bd86f85b4adfb8}, author = {Badidi, E. and Routaib, H.} } @article {Badidi2018369, title = {A DaaS Based Framework for IoT Data Provisioning}, journal = {Advances in Intelligent Systems and Computing}, volume = {661}, year = {2018}, pages = {369-379}, doi = {10.1007/978-3-319-67618-0_34}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029599349\&doi=10.1007\%2f978-3-319-67618-0_34\&partnerID=40\&md5=e76f069dc00af29313bd86f85b4adfb8}, author = {Badidi, E. and Routaib, H.} } @conference {Korachi2018140, title = {Data driven maturity model for assessing smart cities}, booktitle = {ACM International Conference Proceeding Series}, year = {2018}, pages = {140-147}, doi = {10.1145/3289100.3289123}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058660258\&doi=10.1145\%2f3289100.3289123\&partnerID=40\&md5=8908e2ab946a4871a6ec1101872def02}, author = {Korachi, Z. and Bounabat, B.} } @article {ElIdrissi20181142, title = {Data mining techniques in diabetes self-management: A systematic map}, journal = {Advances in Intelligent Systems and Computing}, volume = {746}, year = {2018}, pages = {1142-1152}, doi = {10.1007/978-3-319-77712-2_109}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045346296\&doi=10.1007\%2f978-3-319-77712-2_109\&partnerID=40\&md5=627778cf3f0df5fe749b6e457a0eef1f}, author = {El Idrissi, T. and Idri, A. and Bakkoury, Z.} } @article {Benhar20181208, title = {Data preprocessing for decision making in medical informatics: Potential and analysis}, journal = {Advances in Intelligent Systems and Computing}, volume = {746}, year = {2018}, pages = {1208-1218}, doi = {10.1007/978-3-319-77712-2_116}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045323949\&doi=10.1007\%2f978-3-319-77712-2_116\&partnerID=40\&md5=86d2827399aedd6683b71d8893a2d49c}, author = {Benhar, H. and Idri, A. and Fernandez-Aleman, J.L.} } @conference {Oualahj2018, title = {A Decentralized Control of Autonomous Delay Tolerant Networks: Multi Agents Markov Decision Processes Framework}, booktitle = {IEEE International Conference on Communications}, volume = {2018-May}, year = {2018}, doi = {10.1109/ICC.2018.8422718}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051430605\&doi=10.1109\%2fICC.2018.8422718\&partnerID=40\&md5=ea20d47bd369c11fe0555f1224db09f3}, author = {Oualahj, O.A. and Kobbane, A. and Ben-Othman, J.} } @article {Tamir2018475, title = {A decision support platform based on cross-sorting methods for the selection of modeling methods: Case of the hospital supply chain performance analysis}, journal = {International Journal of Advanced Computer Science and Applications}, volume = {9}, number = {10}, year = {2018}, pages = {475-484}, doi = {10.14569/IJACSA.2018.091058}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057264376\&doi=10.14569\%2fIJACSA.2018.091058\&partnerID=40\&md5=86e7cd3123ccc3b5399a68fccad72235}, author = {Tamir, M. and Chiheb, R. and Ouzayd, F.} } @conference {Oussidi20181, title = {Deep generative models: Survey}, booktitle = {2018 International Conference on Intelligent Systems and Computer Vision, ISCV 2018}, volume = {2018-May}, year = {2018}, pages = {1-8}, doi = {10.1109/ISACV.2018.8354080}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050964855\&doi=10.1109\%2fISACV.2018.8354080\&partnerID=40\&md5=cd609163d931e7f6f77c9850ecabcf2c}, author = {Oussidi, A. and Elhassouny, A.} } @article {Fenjiro201820, title = {Deep reinforcement learning overview of the state of the art}, journal = {Journal of Automation, Mobile Robotics and Intelligent Systems}, volume = {12}, number = {3}, year = {2018}, pages = {20-39}, doi = {10.14313/JAMRIS_3-2018/15}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060533274\&doi=10.14313\%2fJAMRIS_3-2018\%2f15\&partnerID=40\&md5=179cf6788e2041bdac84079f25c37a3e}, author = {Fenjiro, Y. and Benbrahim, H.} } @article {Haddad201812, title = {Design of high gain novel dielectric resonator antenna array for 24 GHz short range radar systems}, journal = {Advanced Electromagnetics}, volume = {7}, number = {4}, year = {2018}, pages = {12-18}, doi = {10.7716/aem.v7i4.874}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052828658\&doi=10.7716\%2faem.v7i4.874\&partnerID=40\&md5=f03b09d177b785f22ce0d744b8db6f72}, author = {Haddad, A. and Aoutoul, M. and Rais, K. and Essaaidi, M. and Faqir, M. and Bouya, M.} } @conference {Maleky2018428, title = {Design of simple printed Dipole antenna on flexible substrate for UHF band}, booktitle = {Procedia Manufacturing}, volume = {22}, year = {2018}, pages = {428-435}, doi = {10.1016/j.promfg.2018.03.067}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049256549\&doi=10.1016\%2fj.promfg.2018.03.067\&partnerID=40\&md5=b2c8132bde575104181fef44b8a91d6d}, author = {Maleky, O.E. and Abdelouahab, F.B. and Essaaidi, M. and Ennasar, M.A.} } @article {Hdioud201870, title = {Detecting and shadows in the HSV color space using dynamic thresholds}, journal = {Bulletin of Electrical Engineering and Informatics}, volume = {7}, number = {1}, year = {2018}, pages = {70-79}, doi = {10.11591/eei.v7i1.893}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045525834\&doi=10.11591\%2feei.v7i1.893\&partnerID=40\&md5=ca79c66bd666526ee7849c304d10f7ef}, author = {Hdioud, B. and El Haj Tirari, M. and Oulad Haj Thami, R. and Faizi, R.} } @conference {Sebbar2018583, title = {Detection MITM Attack in Multi-SDN Controller}, booktitle = {Colloquium in Information Science and Technology, CIST}, volume = {2018-October}, year = {2018}, pages = {583-587}, doi = {10.1109/CIST.2018.8596479}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061446244\&doi=10.1109\%2fCIST.2018.8596479\&partnerID=40\&md5=04ea13257b17af4ec26de2fe0c9aeba7}, author = {Sebbar, A. and Boulmalf, M. and Dafir Ech-Cherif El Kettani, M. and Badd, Y.} } @article {Bustos-Vanegas2018839, title = {Developing predictive models for determining physical properties of coffee beans during the roasting process}, journal = {Industrial Crops and Products}, volume = {112}, year = {2018}, pages = {839-845}, doi = {10.1016/j.indcrop.2017.12.015}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039453221\&doi=10.1016\%2fj.indcrop.2017.12.015\&partnerID=40\&md5=b7818900c7e9d5412b48bb3e994b0da4}, author = {Bustos-Vanegas, J.D. and Corr{\^e}a, P.C. and Martins, M.A. and Baptestini, F.M. and Campos, R.C. and de Oliveira, G.H.H. and Nunes, E.H.M.} } @conference {Boudagdigue2018, title = {A Distributed Advanced Analytical Trust Model for IoT}, booktitle = {IEEE International Conference on Communications}, volume = {2018-May}, year = {2018}, doi = {10.1109/ICC.2018.8422726}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051430306\&doi=10.1109\%2fICC.2018.8422726\&partnerID=40\&md5=8907401b5d112341154ed5ed64157168}, author = {Boudagdigue, C. and Benslimane, A. and Kobbane, A. and Elmachkour, M.} } @conference {Saoud2018211, title = {DSS design for carrier collaboration using Big graph IOT}, booktitle = {2018 International Colloquium on Logistics and Supply Chain Management, LOGISTIQUA 2018}, year = {2018}, pages = {211-215}, doi = {10.1109/LOGISTIQUA.2018.8428266}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85052499129\&doi=10.1109\%2fLOGISTIQUA.2018.8428266\&partnerID=40\&md5=d4b142cb74b25e7b0dc1343eab03f24b}, author = {Saoud, A. and Bellabdaoui, A.} } @article {Illi201855345, title = {Dual-hop mixed RF-UOW communication system: A PHY Security Analysis}, journal = {IEEE Access}, volume = {6}, year = {2018}, pages = {55345-55360}, doi = {10.1109/ACCESS.2018.2870344}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054276852\&doi=10.1109\%2fACCESS.2018.2870344\&partnerID=40\&md5=3402dea4c38b8cc6d781800616c3f6e5}, author = {Illi, E. and El Bouanani, F. and Da Costa, D.B. and Ayoub, F. and Dias, U.S.} } @conference {Idri2017245, title = {A Data Mining-Based Approach for Cardiovascular Dysautonomias Diagnosis and Treatment}, booktitle = {IEEE CIT 2017 - 17th IEEE International Conference on Computer and Information Technology}, year = {2017}, note = {cited By 0}, pages = {245-252}, abstract = {Autonomic nervous system (ANS) is a control system that acts largely unconsciously and regulates bodily functions. An autonomic malfunction can lead to serious problems related to blood pressure, heart, swallowing, breathing and others. A set of dynamic tests are therefore adopted in ANS units to diagnose and treat patients with cardiovascular dysautonomias. These tests generate big amount of data which are very well suited to be processed using data mining techniques. The purpose of this study is to develop a cardiovascular dysautonomias prediction system to identify the appropriate diagnosis and treatment for patients with cardiovascular dysautonomias using a dataset extracted from the ANS unit of the university hospital Avicenne in Morocco. Classification techniques and association rules were used for the diagnosis and treatment stages respectively. In fact, K-nearest neighbors, C4.5 decision tree algorithm, Random forest, Na{\"\i}ve bayes and Support vector machine were applied to generate the diagnosis classification models and Apriori algorithm was used for generating the association rules. The results obtained for each classifier were analyzed and compared to identify the most efficient one. {\textcopyright} 2017 IEEE.}, doi = {10.1109/CIT.2017.28}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032340028\&doi=10.1109\%2fCIT.2017.28\&partnerID=40\&md5=8c8a9ba277b01051d8429283af6bb318}, author = {Idri, A. and Kadi, I.} } @conference {ElAfia2017, title = {Data-driven based aircraft maintenance routing by markov decision process model}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {Aircraft maintenance routing is of basic significance to the safe and efficient operations of an airline. However, the timely efficiency of the airline flight schedule is susceptible to various factors during the daily operations. Air traffic often undergoes some random disruptions that expose maintenance routing to random flight delays, which have to be considered to ensure safe and operational flight schedule. The idea of data-driven methods was the focal point of much studies during a previous couple of years. Constrained Markov Decision process model was selected in this paper to remedy this problem and design the maintenance needs of an aircraft taking past data information into account. Maintenance actions are so modeled with stochastic state transitions. This can offer the opportunity to solve the maintenance routing problem deliberating and handling flight disturbances. Through computational tests on real data of a Moroccan airline company, we investigate the efficiency of this solution approach on history data sets. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090430}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028455379\&doi=10.1145\%2f3090354.3090430\&partnerID=40\&md5=b414d040a3e32989b0121144dbd1f7f3}, author = {El Afia, A. and Aoun, O.} } @conference {ElHaloui2017192, title = {A decision-support model enabling a proactive vision of Cloud Computing adoption}, booktitle = {Proceedings of 2016 International Conference on Cloud Computing Technologies and Applications, CloudTech 2016}, year = {2017}, note = {cited By 0}, pages = {192-198}, abstract = {Market competitiveness and technological evolution encourage companies to seek IT solutions enabling to save costs, increase productivity, and focus on high value-added activities. Outsourcing and cloud computing activities become increasingly adopted choices. That is why, decision makers need to look for decision support tools helping them for the adoption of new solutions and services such as Cloud Computing. This article presents a maturity assessment model of a functional block information system in order to outsource it to Cloud. Basing on the enterprise architecture point of view, best practices repositories, basic criteria of activities outsourcing, and the cloud computing adoption requirements, this model is built around four modules: the block scope identification, the outsourcing opportunity, the cloud computing architectural requirements, and governance and control. Thus, this model will provide new benefits for opened Enterprise Architectures to cloud computing. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CloudTech.2016.7847698}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013765957\&doi=10.1109\%2fCloudTech.2016.7847698\&partnerID=40\&md5=2afd11beb0ac88d120610b32d6878cb2}, author = {El Haloui, M. and Kriouile, A.} } @article {Laghouaouta2017142, title = {A dedicated approach for model composition traceability}, journal = {Information and Software Technology}, volume = {91}, year = {2017}, note = {cited By 0}, pages = {142-159}, abstract = {Context: Software systems are often too complex to be expressed by a single model. Recognizing this, the Model Driven Engineering (MDE) proposes multi-modeling approaches to allow developers to describe a system from different perspectives. In this context, model composition has become important since the combination of those partial representations is inevitable. Nevertheless, no approach has been defined for keeping track of the composition effects, and this operation has been overshadowed by model transformations. Objective This paper presents a traceability approach dedicated to the composition of models. Two aspects of quality are considered: producing relevant traces; and dealing with scalability. Method The composition of softgoal trees has been selected to motivate the need for tracing the composition of models and to illustrate our approach. The base principle is to augment the specification of the composition with the behavior needed to generate the expected composed model accompanied with a trace model. This latter includes traces of the execution details. For that, traceability is considered as a crosscutting concern and encapsulated in an aspect. As part of the proposal, an Eclipse plug-in has been implemented as a tool support. Besides, a comparative experiment has been conducted to assess the traces relevance. We also used the regression method to validate the scalability of the tool support. Results Our experiments show that the proposed approach allows generating relevant traces. In addition, the obtained results reveal that tracing a growing number of elements causes an acceptable increase of response time. Conclusion This paper presents a traceability approach dedicated to the composition of models and its application to softgoal trees. The experiment results reveal that our proposal considers the composition specificities for producing valuable traceability information while supporting scalability. {\textcopyright} 2017 Elsevier B.V.}, doi = {10.1016/j.infsof.2017.07.002}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85021928111\&doi=10.1016\%2fj.infsof.2017.07.002\&partnerID=40\&md5=efd71b14c98f0091be2f0f61bbb50a3f}, author = {Laghouaouta, Y. and Anwar, A. and Nassar, M. and Coulette, B.} } @conference {Benlakhdar2017, title = {Directional data classification using a hierarchical model of von Mises distribution}, booktitle = {ACM International Conference Proceeding Series}, volume = {Part F129474}, year = {2017}, note = {cited By 0}, abstract = {The von Mises distribution1 VM -pdf is a continuous probability distribution on the circle used in directional statistics. A mixture model of von Mises distribution, which is broad enough to cover symmetry as well as asymmetry, unimodality as well as multimodality of circular data. In this paper we use a model comprised of a hierarchical von Mises mixture distribution mode HmvM- pdf where we consider each class is itself the result of a mixture of subclasses. The parameters of our model are estimated using the expectation maximization algorithm EM modified. The HmvM- pdf model achieves higher accuracy than the mvM model and offer a rich modeling. The suitability of the distributions is judged from the coefficient of determination R2. {\textcopyright} 2017 Association for Computing Machinery.}, doi = {10.1145/3090354.3090425}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028459799\&doi=10.1145\%2f3090354.3090425\&partnerID=40\&md5=ce3dab33a54f11b997bc9f8f24999c78}, author = {Benlakhdar, S. and Rziza, M. and Thami, R.O.H.} } @conference {Bouzbita2017344, title = {Dynamic adaptation of the ACS-TSP local pheromone decay parameter based on the Hidden Markov Model}, booktitle = {Proceedings of 2016 International Conference on Cloud Computing Technologies and Applications, CloudTech 2016}, year = {2017}, note = {cited By 1}, pages = {344-349}, abstract = {The objective of the present paper is to propose an improved Ant Colony System (ACS) algorithm based on a Hidden Markov Model (HMM) so as dynamically adapt the local pheromone decay parameter ξ. The proposed algorithm uses Iteration and Diversity as indicators of the hidden states in the search space in ACS. To test the efficiency of our algorithm, we experimented it on several benchmark Travelling Salesman Problem (TSP) instances. The results have proven the effectiveness of our algorithm in both the convergence speed and the solution quality. {\textcopyright} 2016 IEEE.}, doi = {10.1109/CloudTech.2016.7847719}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013861130\&doi=10.1109\%2fCloudTech.2016.7847719\&partnerID=40\&md5=26cafb2d23ee70bf2f3552d7fec22e8a}, author = {Bouzbita, S. and El Afia, A. and Faizi, R. and Zbakh, M.} } @article {Chaker20176182, title = {The dynamic adaptive sustainability balanced scorecard: A new framework for a sustainability-driven strategy}, journal = {International Journal of Applied Engineering Research}, volume = {12}, number = {16}, year = {2017}, note = {cited By 0}, pages = {6182-6191}, abstract = {The Sustainability Balanced Scorecard (SBSC) has been widely recognized as a valuable decision aid approach in the management of sustainability. Controversy is nevertheless still prevalent on which SBSC architecture is most suitable for which organizational context. Moreover, the literature unveils some structural flaws in SBSC design methodologies that remain up till now fundamentally based on intuitionist mental models and subjective judgment. Building upon existing critical evaluations and the gaps to be addressed, we propose in this paper a new sustainability decision aid framework based on a novel combination of Multi-Criteria Decision Making methods (MCDM), Fuzzy logic, and System Dynamics modeling. The resulting framework, denoted as the Dynamic Adaptive SBSC (D-ASBSC), offers the advantage of high adaptability, comprehensiveness, and unbiased methodical cause-effect relationship construction. In addition, thanks to a systematic rule-based generation of the causal loop diagram and the stock-flow chart, the D-ASBSC permits to effectively remedy to the infamous mental models restriction that has long been raised in System Dynamics modeling. {\textcopyright} Research India Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029754284\&partnerID=40\&md5=2b1805b81731ebbb7b35691824759d36}, author = {Chaker, F. and Manouar, A.E. and Idrissi, M.A.J.} } @conference {Harbouche2017, title = {Dynamic coalitional matching game approach for fair and swift data-gathering in wireless body sensor networks}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {Wireless Sensor Networks are deployed in different fields of application to gather data on the monitored environment. The Wireless Body Sensor Network (WBSN) is a wireless sensor network designed to monitor a human body vital and environment parameters. The design and development of such WBSN systems for health monitoring have been motivated by costly healthcare and propelled by the development of miniature health monitoring devices. This paper presents the architecture design of a preventive health care monitoring system. This architecture is designed for monitoring multiple patients in a hospital. It is based on a set of mobile data collectors and static sensors for analysis of various patient{\textquoteright}s parameters. The data collectors need to cooperate together in order to gather the data from the sensor nodes. The point of this paper is how to dynamically and effectively appoint and deploy several data collectors in the hospital to gather the measured data in minimal time. We formulate the problem as a coalitional matching game between patients and data collectors, and we propose a patientdata collector association algorithm that ensures fairness and minimum total course in the stable matchings. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238179}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041451720\&doi=10.1109\%2fWINCOM.2017.8238179\&partnerID=40\&md5=44ef24b18e66b8fb24573d73db99025f}, author = {Harbouche, A. and Elmachkour, M. and Djedi, N. and Erradi, M. and Kobbane, A.} } @conference {Assila2017, title = {A dynamic Stackelberg-Cournot game for competitive content caching in 5G networks}, booktitle = {Proceedings - 2017 International Conference on Wireless Networks and Mobile Communications, WINCOM 2017}, year = {2017}, note = {cited By 0}, abstract = {The main concept behind 5G mobile network is to expand the idea of small cell network (SCN) to create a cooperative network able to cache data in active nodes inside radio access and Core network. Caching technique is a workaround to deal with bottleneck in the Back-haul, as the capacity of the wireless links could not support the increasing demand for rich multimedia. In this perspective multiple contents providers are in competition for caching space of network operator base stations. In fact, the caching space is a limited resource due to the exponential traffic of mobile data and video consumption. It is in this perspective that mobile operators and contents providers find themselves linked in this market profit generating, and consequently linked also in the allocating cache and setting price issues. In this paper we propose a multi-Stackelberg game between multiple MNOs (leaders) and several CPs (followers) computing under the Cournot-Nash assumption. In the first step a multi-leader Stackelberg game between Multiple MNO, considered as the leaders, aims to define the price they charges the CPs to maximize their profit. In the second step a multi-follower Cournot game between the CPs, considered as the followers, compete to increase the space quantity they cache at the MNOs small base stations (SBS) to maximize also their profit and to improve the quality of service (QoS) of their users. Our goal is to find the price the MNOs will set and the quantity of contents that each CP will cache. In the pricing game, each MNO first sets the price. Then the CPs react with proposed quantities of Space to cache. Then after the MNO sets again an optimal price according to the prediction of each CP0s optimal strategies. Numerical results describe the structure of the Nash equilibrium and the optimal prices resulting from the MNOs and CPs optimal strategies. {\textcopyright} 2017 IEEE.}, doi = {10.1109/WINCOM.2017.8238184}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041439197\&doi=10.1109\%2fWINCOM.2017.8238184\&partnerID=40\&md5=36792f2ffea42a36227d37ccdf8af78b}, author = {Assila, B. and Kobbaney, A. and Elmachkourz, M. and El Koutbi, M.} } @article {Idri20161, title = {Dealing with missing values in software project datasets: A systematic mapping study}, journal = {Studies in Computational Intelligence}, volume = {653}, year = {2016}, note = {cited By 0}, pages = {1-16}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. Therefore, various techniques have been developed to deal adequately with MV. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-33810-1_1}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969232970\&doi=10.1007\%2f978-3-319-33810-1_1\&partnerID=40\&md5=bcf59f4d24258293351f5636660fd78e}, author = {Idri, A.a and Abnane, I.a and Abran, A.b} } @conference { ISI:000389715100001, title = {Dealing with Missing Values in Software Project Datasets: A Systematic Mapping Study}, booktitle = {SOFTWARE ENGINEERING, ARTIFICIAL INTELLIGENCE, NETWORKING AND PARALLEL/DISTRIBUTED COMPUTING}, series = {Studies in Computational Intelligence}, volume = {653}, year = {2016}, note = {17th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD), Shanghai, PEOPLES R CHINA, MAY 30-JUN 01, 2016}, pages = {1-16}, publisher = {IEEE; Int Assoc Comp \& Informat Sci; SSCTL; IEEE Comp Soc; Cent Michigan Univ, Software Engn \& Informat Technol Inst; Shanghai Univ; Shanghai Key Lab Comp Software Testing \& Evaluating}, organization = {IEEE; Int Assoc Comp \& Informat Sci; SSCTL; IEEE Comp Soc; Cent Michigan Univ, Software Engn \& Informat Technol Inst; Shanghai Univ; Shanghai Key Lab Comp Software Testing \& Evaluating}, abstract = {Missing Values (MV) present a serious problem facing research in software engineering (SE) which is mainly based on statistical and/or data mining analysis of SE data. Therefore, various techniques have been developed to deal adequately with MV. In this paper, a systematic mapping study was carried out to summarize the existing techniques dealing with MV in SE datasets and to classify the selected studies according to six classification criteria: research type, research approach, MV technique, MV type, data types and MV objective. Publication channels and trends were also identified. As results, 35 papers concerning MV treatments of SE data were selected. This study shows an increasing interest in machine learning (ML) techniques especially the K-nearest neighbor algorithm (KNN) to deal with MV in SE datasets and found that most of the MV techniques are used to serve software development effort estimation techniques.}, isbn = {978-3-319-33810-1; 978-3-319-33809-5}, issn = {1860-949X}, doi = {10.1007/978-3-319-33810-1\_1}, author = {Idri, Ali and Abnane, Ibtissam and Abran, Alain}, editor = {Lee, R} } @article {Belhiah20161, title = {Decision support system for implementing data quality projects}, journal = {Communications in Computer and Information Science}, volume = {584}, year = {2016}, note = {cited By 0}, pages = {1-16}, abstract = {The new data-oriented shape of organizations inevitably imposes the need for the improvement of their data quality (DQ). In fact, growing data quality initiatives are offering increased monetary and non-monetary benefits for organizations. These benefits include increased customer satisfaction, reduced operating costs and increased revenues. However, regardless of the numerous initiatives, there is still no globally accepted approach for evaluating data quality projects in order to build the optimal business cases taking into account the benefits and the costs. This paper presents a model to clearly identify the opportunities for increased monetary and non-monetary benefits from improved data quality within an Enterprise Architecture context. The aim of this paper is to measure, in a quantitative manner, how key business processes help to execute an organization{\textquoteright}s strategy and then to qualify the benefits as well as the complexity of improving data, that are consumed and produced by these processes. These findings will allow to select data quality improvement projects, based on the latter{\textquoteright}s benefits to the organization and their costs of implementation. To facilitate the understanding of this approach, a Java EE Web application is developed and presented here. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30162-4_1}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84961133327\&doi=10.1007\%2f978-3-319-30162-4_1\&partnerID=40\&md5=76393a1237969723e0231c6308362426}, author = {Belhiah, M. and Benqatla, M.S. and Bounabat, B.} } @conference { ISI:000389502600039, title = {Deep Neural Networks for Medical Images}, booktitle = {Networked Systems, NETYS 2016}, series = {Lecture Notes in Computer Science}, volume = {9944}, year = {2016}, note = {4th International Conference on Networked Systems (NETYS), Marrakech, MOROCCO, MAY 18-20, 2016}, pages = {382}, isbn = {978-3-319-46140-3; 978-3-319-46139-7}, issn = {0302-9743}, author = {Elaalyani, Issam and Erradi, Mohammed}, editor = {Abdulla, PA and DelporteGallet, C} } @conference {Aylaj2016557, title = {Degeneration simulated annealing algorithm for combinatorial optimization problems}, booktitle = {International Conference on Intelligent Systems Design and Applications, ISDA}, volume = {2016-June}, year = {2016}, note = {cited By 0}, pages = {557-562}, abstract = {In this paper, we use the physical aspect of the simulated annealing method in order to propose a modified simulated annealing algorithm. The main idea of the algorithm is based to find the optimal solution of a combinatorial optimization problem by switching between two different subsystems of treatment; using so called degeneration of atoms energy. This new algorithm is named Degeneration Simulated Annealing (DSA) algorithm. To illustrate the effectiveness of DSA, it is applied to solve the problems of the minimum distance and the decoding, in coding theory. The computational experiment results obtained by DSA are very interesting. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ISDA.2015.7489177}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978430456\&doi=10.1109\%2fISDA.2015.7489177\&partnerID=40\&md5=0471da6bb951204df77f8f7a184b034d}, author = {Aylaj, B.a and Belkasmi, M.b and Zouaki, H.a and Berkani, A.b} } @conference { ISI:000385280000048, title = {On the Deployment Quality for Multi-intrusion Detection in Wireless Sensor Networks}, booktitle = {PROCEEDINGS OF THE MEDITERRANEAN CONFERENCE ON INFORMATION \& COMMUNICATION TECHNOLOGIES 2015 (MEDCT 2015), VOL 2}, series = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {Mediterranean Conference on Information and Communication Technologies (MedCT), Saidia, MOROCCO, MAY 07-09, 2015}, pages = {469-478}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. When the random deployment is required, which sensor nodes are uniformly randomly distributed over on surface area, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range and node density that impact overall system performance. To enhance the detection quality for single/multi intrusion, a probabilistic intrusion detection models are adopted, called single and multi sensing probability detection and the deployment quality issue is surveyed and analysed in term of coverage.}, isbn = {978-3-319-30298-0; 978-3-319-30296-6}, issn = {1876-1100}, doi = {10.1007/978-3-319-30298-0\_48}, author = {Assad, Noureddine and Elbhiri, Brahim and Faqihi, My Ahmed and Ouadou, Mohamed and Aboutajdine, Driss}, editor = {ElOualkadi, A and Choubani, F and ElMoussati, A} } @article {Assad2016469, title = {On the deployment quality for multi-intrusion detection in wireless sensor networks}, journal = {Lecture Notes in Electrical Engineering}, volume = {381}, year = {2016}, note = {cited By 0}, pages = {469-478}, abstract = {The intrusion detection in a Wireless Sensor Network is defined as a mechanism to monitor and detect any intruder in a sensing area. The sensor deployment quality is a critical issue since it reflects the cost and detection capability of a wireless sensor network. When the random deployment is required, which sensor nodes are uniformly randomly distributed over on surface area, determining the deployment quality becomes challenging. In the intrusion detection application, it is necessary to define more precise measures of sensing range and node density that impact overall system performance. To enhance the detection quality for single/multi intrusion, a probabilistic intrusion detection models are adopted, called single and multi sensing probability detection and the deployment quality issue is surveyed and analysed in term of coverage. {\textcopyright} Springer International Publishing Switzerland 2016.}, doi = {10.1007/978-3-319-30298-0_48}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964066188\&doi=10.1007\%2f978-3-319-30298-0_48\&partnerID=40\&md5=5dca2bc944d09de42dd56334a955161e}, author = {Assad, N.a and Elbhiri, B.b and Faqihi, M.A.c and Ouadou, M.a and Aboutajdine, D.a} } @article { ISI:000389653700013, title = {Dominant Multipoint Relaying Method for Efficient Proactive Routing Schema}, journal = {AD HOC \& SENSOR WIRELESS NETWORKS}, volume = {33}, number = {1-4}, year = {2016}, pages = {321-338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes.}, issn = {1551-9899}, author = {Bachir, Bouamoud and Ahmed, Habbani and Zouhair, Guennoun} } @article {Bachir2016321, title = {Dominant multipoint relaying method for efficient proactive routing schema}, journal = {Ad-Hoc and Sensor Wireless Networks}, volume = {33}, number = {1-4}, year = {2016}, note = {cited By 0}, pages = {321-338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes. {\textcopyright} 2016 Old City Publishing, Inc.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84996743438\&partnerID=40\&md5=4b6e635684dcd4a5121acf22d335fa2a}, author = {Bachir, B.a and Ahmed, H.a b and Zouhair, G.b} } @article {11984480720160715, title = {Dominant Multipoint Relaying Method for Efficient Proactive Routing Schema.}, journal = {Adhoc \& Sensor Wireless Networks}, volume = {33}, number = {1-4}, year = {2016}, pages = {321 - 338}, abstract = {The performance of proactive routing protocol in the context of mobile ad hoc networks (MANET) depends on broadcasting schemas. They are essential to build up an efficient topology knowledge which is required to compute valid routes to any destination inside network. In this paper, we focus on neighbor knowledge broadcasting especially on multipoint relaying concept. We have modeled the problem of multipoint relaying nodes selection as a set cover problem and customized the domain of feasible solution and the target set to be covered based on greedy approach and neighborhood relation. To assess the performance of our proposal referred as Enhanced Dominating Multipoint Relaying (EDMPR) method we have implemented it in the context of OLSR. Our findings suggest that the proposed method enables broadcasting signaling packets (i.e. topology messages) packets in a faster manner with a reduced number of broadcasting nodes. [ABSTRACT FROM AUTHOR]}, keywords = {Ad hoc networks (Computer networks), Broadcast storm, Data packets \& packeting, mobile ad hoc network, Multipoint distribution service, Multipoint relaying, neighbor knowledge broadcast, proactive routing, Routing (Computer network management), Self-organizing systems}, issn = {15519899}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=119844807\&site=ehost-live}, author = {Bachir, Bouamoud and Ahmed, Habbani and Zouhair, Guennoun} } @article {Ouahabi201658, title = {Dual-band band stop filter based on complementary split ring resonator}, journal = {International Journal of Microwave and Optical Technology}, volume = {11}, number = {1}, year = {2016}, note = {cited By 0}, pages = {58-63}, abstract = {This paper describes the design of dual-band band stop filter (DBBSF). This architecture use complementary split ring resonator to tune the band stop frequencies and the filter characteristics are improved using three open stubs. The CSRR is used to reject two frequency responses at 2.45GHz and 5.8GHz, operating in the Industrial Scientific Medical (ISM), and the open stubs are employed to improve the return loss between the two frequency responses. To validate the design concept, a prototype of DBBSF was fabricated and evaluated. The measured frequency responses agree well with the simulation results, validating our proposed design. {\textcopyright} 2016 IAMOT.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84957795026\&partnerID=40\&md5=812924442f8aa6943b13515c0beae228}, author = {Ouahabi, M.E.a and Zakriti, A.b and Essaaidi, M.c and Touhami, N.A.a} } @article { ISI:000351605500008, title = {Data traffic-based analysis of delay and energy consumption in cognitive radio networks with and without resource reservation}, journal = {INTERNATIONAL JOURNAL OF COMMUNICATION SYSTEMS}, volume = {28}, number = {7}, year = {2015}, month = {MAY 10}, pages = {1316-1328}, abstract = {A new opportunistic cross-layer MAC protocol involving channel allocation and packet scheduling for cognitive radio networks is proposed. Cognitive radio allows secondary users (SUs) to exploit the available portions of the licensed spectrum bands without interfering with primary users. In particular, we consider a cognitive radio system, where SUs are equipped with two transceivers: a control transceiver and a software-defined radio transceiver. Data traffic characteristics of SUs are considered to ameliorate system performance. So, we propose a mechanism of resource reservation to improve QoS requirements that favors successful SUs to transmit data during x time slots without interfering with primary users. The key novelty of this paper is giving priority for SUs with important data traffic and which frequently solicits data channels to transmit for the remaining time of the ongoing time slot and for the next time slots directly after checking the channel availability. We develop a new analytical model to evaluate delay parameter for two scenarios with and without resource reservation and we then investigate the impact of those scenarios on the energy consumption. We show through simulations that cognitive radio performances increase noticeably with the proposed scheme. Copyright (c) 2014 John Wiley \& Sons, Ltd.}, issn = {1074-5351}, doi = {10.1002/dac.2764}, author = {Elmachkour, Mouna and Kobbane, Abdellatif and Sabir, Essaid and Ben-Othman, Jalel and El Koutbi, Mohammed} } @article {Elmachkour20151316, title = {Data traffic-based analysis of delay and energy consumption in cognitive radio networks with and without resource reservation}, journal = {International Journal of Communication Systems}, volume = {28}, number = {7}, year = {2015}, note = {cited By 6}, pages = {1316-1328}, abstract = {A new opportunistic cross-layer MAC protocol involving channel allocation and packet scheduling for cognitive radio networks is proposed. Cognitive radio allows secondary users (SUs) to exploit the available portions of the licensed spectrum bands without interfering with primary users. In particular, we consider a cognitive radio system, where SUs are equipped with two transceivers: a control transceiver and a software-defined radio transceiver. Data traffic characteristics of SUs are considered to ameliorate system performance. So, we propose a mechanism of resource reservation to improve QoS requirements that favors successful SUs to transmit data during x time slots without interfering with primary users. The key novelty of this paper is giving priority for SUs with important data traffic and which frequently solicits data channels to transmit for the remaining time of the ongoing time slot and for the next time slots directly after checking the channel availability. We develop a new analytical model to evaluate delay parameter for two scenarios with and without resource reservation and we then investigate the impact of those scenarios on the energy consumption. We show through simulations that cognitive radio performances increase noticeably with the proposed scheme. Copyright {\textcopyright} 2014 John Wiley \& Sons, Ltd.}, doi = {10.1002/dac.2764}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84925355761\&doi=10.1002\%2fdac.2764\&partnerID=40\&md5=ecf737b718373c4b7731373c0afd1200}, author = {Elmachkour, M.a and Kobbane, A.a c and Sabir, E.b and Ben-Othman, J.c and El Koutbi, M.a} } @conference { ISI:000380431500113, title = {A decision tree-based approach for cardiovascular dysautonomias diagnosis}, booktitle = {2015 IEEE SYMPOSIUM SERIES ON COMPUTATIONAL INTELLIGENCE (IEEE SSCI)}, year = {2015}, note = {IEEE Symposium Series Computational Intelligence, Cape Town, SOUTH AFRICA, DEC 07-10, 2015}, pages = {816-823}, publisher = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, organization = {IEEE; IEEE Computational Intelligence Soc; IEEE BigData}, abstract = {Terms as knowledge Discovery from Databases (KDD), Data Mining (DM) and Machine Learning (ML), gain from day to day, an increasing significance in medical data analysis. They permit the identification, evaluation, and quantification of some less visible, intuitively unpredictable, by using generally large sets of data. Researchers have long been concerned with applying DM tools to improve data analysis on large data sets. DM has been increasingly used in medicine, particularly in cardiology. In fact, data mining applications can greatly benefits all parts involved in cardiology. Autonomic nervous system (ANS) is the part of the nervous system that is involved in homeostasis of the whole body functions. A malfunction in this system can lead to a cardiovascular dysautonomias. Thereby, a set of dynamic tests are adopted in ANS units to diagnose and treat patients with cardiovascular dysautonomias. In this paper, a case study was performed in order to construct a cardiovascular dysautonomias prediction system using data mining techniques and a dataset collected from an ANS unit of the Moroccan university hospital Avicenne. The prediction system is a decision tree-based classifier that was developed using C4.5 decision tree algorithm to automate the analysis procedure of ANS{\textquoteright}s test results and make it easier for specialists. The performance of the generated decision trees was evaluated and the results obtained achieved high accuracy rates which were very promising. In addition, a clinical validation of the developed system was carried out on new patients. In fact, a prototype of the developed system was implemented on JEE platform and deployed in the ANS unit so as to be validated clinically. The results were analyzed and thus the prototype was approved to be highly accurate, interpretable, time saving and easy to use.}, isbn = {978-1-4799-7560-0}, doi = {10.1109/SSCI.2015.121}, author = {Kadi, Ilham and Idri, Ali} } @conference {Kadi2015816, title = {A decision tree-based approach for cardiovascular dysautonomias diagnosis: A case study}, booktitle = {Proceedings - 2015 IEEE Symposium Series on Computational Intelligence, SSCI 2015}, year = {2015}, note = {cited By 1}, pages = {816-823}, abstract = {Terms as knowledge Discovery from Databases (KDD), Data Mining (DM) and Machine Learning (ML), gain from day to day, an increasing significance in medical data analysis. They permit the identification, evaluation, and quantification of some less visible, intuitively unpredictable, by using generally large sets of data. Researchers have long been concerned with applying DM tools to improve data analysis on large data sets. DM has been increasingly used in medicine, particularly in cardiology. In fact, data mining applications can greatly benefits all parts involved in cardiology. Autonomic nervous system (ANS) is the part of the nervous system that is involved in homeostasis of the whole body functions. A malfunction in this system can lead to a cardiovascular dysautonomias. Thereby, a set of dynamic tests are adopted in ANS units to diagnose and treat patients with cardiovascular dysautonomias. In this paper, a case study was performed in order to construct a cardiovascular dysautonomias prediction system using data mining techniques and a dataset collected from an ANS unit of the Moroccan university hospital Avicenne. The prediction system is a decision tree-based classifier that was developed using C4.5 decision tree algorithm to automate the analysis procedure of ANS{\textquoteright}s test results and make it easier for specialists. The performance of the generated decision trees was evaluated and the results obtained achieved high accuracy rates which were very promising. In addition, a clinical validation of the developed system was carried out on new patients. In fact, a prototype of the developed system was implemented on JEE platform and deployed in the ANS unit so as to be validated clinically. The results were analyzed and thus the prototype was approved to be highly accurate, interpretable, time saving and easy to use. {\textcopyright} 2015 IEEE.}, doi = {10.1109/SSCI.2015.121}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964940639\&doi=10.1109\%2fSSCI.2015.121\&partnerID=40\&md5=f0d227a9f5e18f21b2c5aebad30b41f8}, author = {Kadi, I. and Idri, A.} } @article {Saissi20151044, title = {Deep web integration: The tip of the iceberg}, journal = {International Review on Computers and Software}, volume = {10}, number = {10}, year = {2015}, note = {cited By 2}, pages = {1044-1053}, abstract = {The web is divided in two parts, a part that search engines can access and which is called the surface web, and an inaccessible part called the deep web. The deep web is much bigger and richer in information than the surface web, and its web sources are only accessible through the associated Html forms. Our aim in this paper is to present our automatic approach to extract a relational schema describing a selected deep web source. This relational schema can be used by a virtual integration system to access the associated deep web source. Our approach is based on a static and dynamic analysis of the Html forms giving access to the selected deep web source. Our approach process uses two external knowledge databases: The first one is our proprietary knowledge database about the deep web domains called the Identification Tables and the second one is an external ontology. All the information extracted by our approach from and through the associated Html forms are used subsequently to build our final relational schema describing the associated deep web source. {\textcopyright} 2015 Praise Worthy Prize S.r.l. - All rights reserved.}, doi = {10.15866/irecos.v10i10.7755}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959137103\&doi=10.15866\%2firecos.v10i10.7755\&partnerID=40\&md5=d936ba2cd781f89fb3b9bb571b5e02e3}, author = {Saissi, Y.a and Zellou, A.a b and Idri, A.a c} } @conference {Zakriti2015, title = {Design of a compact CPW ultra-wideband antenna}, booktitle = {Mediterranean Microwave Symposium}, volume = {2015-April}, year = {2015}, note = {cited By 0}, abstract = {In this paper, a design and analysis of compact coplanar waveguide-fed ultra wideband antenna is presented. The antenna design exhibits a very wide operating bandwidth of 14.3GHz with a return loss better than 10dB in the frequency range from 3.7GHz to 18GHz. The gain of the proposed antenna is around 4.8dBi. This antenna configuration would be useful for UWB indoor application as it is easy to fabricate and integrate with RF circuitry. All simulations in this work were carried out by using the electromagnetic software. {\textcopyright} 2014 IEEE.}, doi = {10.1109/MMS.2014.7088987}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84938296316\&doi=10.1109\%2fMMS.2014.7088987\&partnerID=40\&md5=a670f55ec4814a4d59bcc99a621653f0}, author = {Zakriti, A.a and Essaaidi, M.b and Touhami, N.A.c} } @article {Naghar20151813, title = {Design of compact multiband bandpass filter with suppression of second harmonic spurious by coupling gap reduction}, journal = {Journal of Electromagnetic Waves and Applications}, volume = {29}, number = {14}, year = {2015}, note = {cited By 1}, pages = {1813-1828}, abstract = {In this paper, we describe a method to implement compact multiband bandpass filters with suppression of second harmonic frequency. This filter design approach is based on decreasing the coupling gap between adjacent resonators of a parallel-coupled-line bandpass filter in order to achieve both the desired multiband frequency response and the spurious suppression. We present the theoretical analysis of the proposed structure that consists of modeling the frequency dependence of the even- and odd-mode characteristic impedances as well as due to the different phase velocities of the parallel-coupled microstrip lines. As an example, a compact tri-band parallel-coupled-line bandpass filter with suppression of second harmonic frequency was implemented operating at 1.9/3.2/4.6 GHz to cover PCS1900, WiMAX, and C-band applications. A three-pole Chebyshev parallel-coupled microstrip bandpass filter was designed at a center frequency of 3.2 GHz and used as the basis to validate the gapping effect on the filter response which also achieves a narrower bandwidth for the second harmonic. Finally, the filter performance with minimized coupling gap is compared to a filter enhanced by the insertion of apertures in the ground plane. Generally speaking, good agreement was accomplished between simulated, calculated, and measured results. {\textcopyright} 2015 Taylor \& Francis.}, doi = {10.1080/09205071.2015.1043029}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84940714115\&doi=10.1080\%2f09205071.2015.1043029\&partnerID=40\&md5=b402327d69e8f5f98418025a5c5cd607}, author = {Naghar, A.a and Aghzout, O.a and Alejos, A.V.b and Sanchez, M.G.b and Essaaidi, M.c} } @article { ISI:000360305800001, title = {Design of compact multiband bandpass filter with suppression of second harmonic spurious by coupling gap reduction}, journal = {JOURNAL OF ELECTROMAGNETIC WAVES AND APPLICATIONS}, volume = {29}, number = {14}, year = {2015}, month = {SEP 22}, pages = {1813-1828}, abstract = {In this paper, we describe a method to implement compact multiband bandpass filters with suppression of second harmonic frequency. This filter design approach is based on decreasing the coupling gap between adjacent resonators of a parallel-coupled-line bandpass filter in order to achieve both the desired multiband frequency response and the spurious suppression. We present the theoretical analysis of the proposed structure that consists of modeling the frequency dependence of the even- and odd-mode characteristic impedances as well as due to the different phase velocities of the parallel-coupled microstrip lines. As an example, a compact tri-band parallel-coupled-line bandpass filter with suppression of second harmonic frequency was implemented operating at 1.9/3.2/4.6GHz to cover PCS1900, WiMAX, and C-band applications. A three-pole Chebyshev parallel-coupled microstrip bandpass filter was designed at a center frequency of 3.2GHz and used as the basis to validate the gapping effect on the filter response which also achieves a narrower bandwidth for the second harmonic. Finally, the filter performance with minimized coupling gap is compared to a filter enhanced by the insertion of apertures in the ground plane. Generally speaking, good agreement was accomplished between simulated, calculated, and measured results.}, issn = {0920-5071}, doi = {10.1080/09205071.2015.1043029}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohammed} } @article {Naghar20151786, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap}, journal = {IET Microwaves, Antennas and Propagation}, volume = {9}, number = {15}, year = {2015}, note = {cited By 0}, pages = {1786-1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands demonstrating an excellent performance, with a fractional bandwidth covering the 40\% and 100\% of the FCC bandwidth respectively. The proposed technique alleviates the fabrication accuracy requirements. The designs show an optimal improvement in terms of group velocity flatness.}, doi = {10.1049/iet-map.2015.0188}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949946910\&doi=10.1049\%2fiet-map.2015.0188\&partnerID=40\&md5=ae56641eed11a3a3b77fe146406fba57}, author = {Naghar, A.a b and Aghzout, O.c and Alejos, A.V.a and Sanchez, M.G.a and Essaaidi, M.d} } @article { ISI:000366391200018, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap}, journal = {IET MICROWAVES ANTENNAS \& PROPAGATION}, volume = {9}, number = {15}, year = {2015}, month = {DEC 10}, pages = {1786-1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands demonstrating an excellent performance, with a fractional bandwidth covering the 40\% and 100\% of the FCC bandwidth respectively. The proposed technique alleviates the fabrication accuracy requirements. The designs show an optimal improvement in terms of group velocity flatness.}, issn = {1751-8725}, doi = {10.1049/iet-map.2015.0188}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @article {11164386420151201, title = {Design of compact wideband multi-band and ultrawideband band pass filters based on coupled half wave resonators with reduced coupling gap.}, journal = {IET Microwaves, Antennas \& Propagation}, volume = {9}, number = {15}, year = {2015}, pages = {1786 - 1792}, abstract = {In this paper we propose a technique to design compact multi-band and UWB bandpass filters based on coupled half wave resonators. The proposed design consists of the modification of a conventional parallel coupled Chebyshev bandpass filter structure by setting a very small or null coupling gap between the resonators of the center sections jointly with a very small spacing between resonators of the extremity sections. This spacing determines the performances of selected frequency bands. An ultrawideband response is accomplished by applying null spacing between all the adjacent resonators. We analysed the effect of the separation distance between the coupled lines on both the fractional bandwidth and group velocity of the filter response. The effect of the order assumed for the initial Chebyshev filter was also discussed. As an illustration of the proposed technique, we designed and measured a dual band and a tri-band filter for the frequencies covering the WiMAX/WLAN/X system bands dem}, keywords = {Bandpass filters {\textendash} Research, Chebyshev systems {\textendash} Research, Electric resonators {\textendash} Research, Ultra-wideband antennas {\textendash} Research, Wireless LANs {\textendash} Research}, issn = {17518725}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=iih\&AN=111643864\&site=ehost-live}, author = {Naghar, Azzedin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @conference { ISI:000380570700061, title = {Designing ReDy Distributed Systems}, booktitle = {2015 IEEE INTERNATIONAL CONFERENCE ON AUTONOMIC COMPUTING}, year = {2015}, note = {IEEE International Conference on Autonomic Computing, Grenoble, FRANCE, JUL 07-10, 2015}, pages = {331-336}, publisher = {IEEE; usenix; Univ Joseph Fourier; Telecom ParisTech; Spec; GRENOBLE ALPLES METROPOLE; VILLE CRENOBOLE; MINALOGIC; CPS; IEEE COMPUTER SOC; hp; Google; orange; FOCAS; INRIA INVENTORS DIGITAL WORLD}, organization = {IEEE; usenix; Univ Joseph Fourier; Telecom ParisTech; Spec; GRENOBLE ALPLES METROPOLE; VILLE CRENOBOLE; MINALOGIC; CPS; IEEE COMPUTER SOC; hp; Google; orange; FOCAS; INRIA INVENTORS DIGITAL WORLD}, abstract = {Distributed systems are largely present and deployed in recent applications. Several systems have common basic requirements, which motivates to adapt reusable solutions for each family of systems. In this paper, we focus on distributed systems designed for large-scale applications requiring a high degree of Reliability and Dynamicity (ReDy distributed systems). We propose a basic architecture for this family of systems and a design solution to guarantee the scalability of the system, the fault tolerance, and a highly dynamic membership management. The studied systems range from hybrid architecture, on which we combine centralized and decentralized solutions.}, isbn = {978-1-4673-6971-8}, doi = {10.1109/ICAC.2015.63}, author = {Hafdi, Kaoutar and Kriouile, Abdelaziz}, editor = {Lalanda, P and Diaconescu, A and Cherkasova, L and Kounev, S} } @conference {Hafdi2015331, title = {Designing ReDy distributed systems}, booktitle = {Proceedings - IEEE International Conference on Autonomic Computing, ICAC 2015}, year = {2015}, note = {cited By 0}, pages = {331-336}, abstract = {Distributed systems are largely present and deployed in recent applications. Several systems have common basic requirements, which motivates to adapt reusable solutions for each family of systems. In this paper, we focus on distributed systems designed for large-scale applications requiring a high degree of Reliability and Dynamicity (ReDy distributed systems). We propose a basic architecture for this family of systems and a design solution to guarantee the scalability of the system, the fault tolerance, and a highly dynamic membership management. The studied systems range from hybrid architecture, on which we combine centralized and decentralized solutions. {\textcopyright} 2015 IEEE.}, doi = {10.1109/ICAC.2015.63}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84961839190\&doi=10.1109\%2fICAC.2015.63\&partnerID=40\&md5=f4847babbee4e85fc1f3d4aa04d701f1}, author = {Hafdi, K. and Kriouile, A.} } @conference { ISI:000380403000031, title = {Detecting Feature Duplication in Natural Language Specifications when Evolving Software Product Lines}, booktitle = {ENASE 2015 - PROCEEDINGS OF THE 10TH INTERNATIONAL CONFERENCE ON EVALUATION OF NOVEL APPROACHES TO SOFTWARE ENGINEERING}, year = {2015}, note = {10th International Conference on Evaluation of Novel Approaches to Software Engineering, Barcelona, SPAIN, APR 29-30, 2015}, pages = {257-262}, publisher = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, organization = {Inst Syst \& Technol Information, Control \& Commun; Tech Council Software Engn; IEEE Comp Soc}, abstract = {Software product lines are dynamic systems that need to evolve continuously to meet new customer requirements. This evolution impacts both the core platform of the product line and its derived products. For several reasons, the most common way to express requirements by customers is natural language. However, the experience has shown that this communication channel does not give the possibility to detect system defects such as inconsistency and duplication. The objective of this paper is to propose a method to transform textual requirements into the XML format used by some Feature-oriented software development tools, in order to facilitate the detection of features duplication.}, isbn = {978-9-8975-8143-4}, author = {Khtira, Amal and Benlarabi, Anissa and El Asri, Bouchra}, editor = {Filipe, J and Maciaszek, L} } @article {Elmagrouni2015773, title = {A development process for adaptable services-oriented systems}, journal = {International Review on Computers and Software}, volume = {10}, number = {7}, year = {2015}, note = {cited By 1}, pages = {773-783}, abstract = {In this work, we present our approach for the Development of adaptable Servicesoriented Systems. The process consists of four phases (Preparation, Elaboration, Construction and transition).It is based on the Rational Unified Process (RUP) and component-oriented development process modelling (BPM: Business Process Modelling).We discuss the shortcomings of current solutions for adaptive service-oriented System. To address those shortcomings, we introduce techniques that can be used to build and evolve proactive Services-oriented Systems. The use of those techniques in an integrated way is described along the phases of the service lifecycle. A lightweight WSDL extension of the standard WSDL (Web Service Description Language)is used to describe the service functionalities. The extension is carried out as a metamodel transformation, according to principles and standards recommended by the Model Driven Architecture (MDA). {\textcopyright} 2015 Praise Worthy Prize S.r.l. - All rights reserved.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84943801868\&partnerID=40\&md5=b46057e08465b42d2d72b19a6ed13121}, author = {Elmagrouni, I.a and Kenzi, A.b and Lethrech, M.a and Kriouile, A.a} } @article {Allouch201531, title = {Distributed CloudIMS: Future-Generation Network with Internet of Thing Based on Distributed Cloud Computing}, journal = {Advances in Intelligent Systems and Computing}, volume = {308 AISC}, number = {VOLUME 1}, year = {2015}, note = {cited By 0}, pages = {31-45}, abstract = {The next-generation network, cloud computing, and Internet of thing are a challenging and promising paradigm shift in IT world technology. Diminishing the cost for users for provisioning anywhere connecting at anytime from anywhere network, CloudIMS consists of interconnecting heterogeneous access technology and to respond to a major challenge for serving the increase in demand and scalable network access to share pool of configurable resource of enabling a convenient cloud computing. This paper mainly focused on common approach to integrate the IP multimedia subsystem (IMS), the Internet of thing, and cloud computing under the name of CloudIMS architecture which makes multimedia service easy to deploy on a cloud platform. We present the state of art of the different elements of CloudIMS. Moreover, we examine the layers designed for CloudIMS based on next-generation network access for mobile communication devices between different types of technologies (3GPP and non-3GPP), such as global system for mobile communication (GSM), wireless network, worldwide interoperability for microwave access (WiMAX), Universal Mobile Telecommunications System (UMTS) and long-term evolution (LTE). Finally, we present an architecture of CloudIMS according to our point of view, followed by a discussion of a use case for the future networks. {\textcopyright} Springer India 2015.}, doi = {10.1007/978-81-322-2012-1_5}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906933110\&doi=10.1007\%2f978-81-322-2012-1_5\&partnerID=40\&md5=d708396f65127dc5fbaa3123c7e2b9aa}, author = {Allouch, H. and Belkasmi, M.} } @conference {BenChekroun2015320, title = {A distributed open-close access for Small-Cell networks: A random matrix game analysis}, booktitle = {IWCMC 2015 - 11th International Wireless Communications and Mobile Computing Conference}, year = {2015}, note = {cited By 0}, pages = {320-325}, abstract = {Nowadays, Small-Cells are widely being deployed to assist and improve performance of mobile networks. Indeed, they are a promising solution to improve coverage and to offload data traffic in mobile networks. In this paper, we propose a signaling-less architecture of the heterogeneous network composed of one single Macro Base Station and a Single Small-Cell. First, we construct a game theoretic framework for channel-state independent interaction. We present many conditions for the existence of Pure Nash equilibrium. Next, and in order to capture the continuous change of the channel state, we build a random matrix game where the channel state is considered to be random (potentially ruled by some given distribution). A characterization of Nash equilibrium is provided in terms of pure strategies and mixed strategies. Convergence to Nash equilibrium is furthermore guaranteed using a variant of the well-known Combined fully distributed payoff and strategy learning. Our algorithm converges faster (only 10-20 iterations are required to converge to Nash equilibrium) and only need a limited amount of local information. This is quite promising since it says that our scheme is almost applicable for all environments (fast fading included). {\textcopyright} 2015 IEEE.}, doi = {10.1109/IWCMC.2015.7289103}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949516312\&doi=10.1109\%2fIWCMC.2015.7289103\&partnerID=40\&md5=09ea6932ff0311a5cea7e0dc62597821}, author = {Ben Chekroun, S.a and Sabir, E.b and Kobbane, A.c and Tembine, H.d and Bouyakhf, E.-H.a and Ibrahimi, K.e} } @conference { ISI:000381477900056, title = {A Distributed Open-Close Access for Small-Cell Networks: A Random Matrix Game Analysis}, booktitle = {2015 INTERNATIONAL WIRELESS COMMUNICATIONS \& MOBILE COMPUTING CONFERENCE (IWCMC)}, series = {International Wireless Communications and Mobile Computing Conference}, year = {2015}, note = {11th IEEE International Wireless Communications and Mobile Computing Conference (IEEE IWCMC), Dubrovnik, CROATIA, AUG 24-25, 2015}, pages = {320-325}, publisher = {IEEE; IEEE Croatia Sect; Univ Dubrovnik}, organization = {IEEE; IEEE Croatia Sect; Univ Dubrovnik}, abstract = {Nowadays, Small-Cells are widely being deployed to assist and improve performance of mobile networks. Indeed, they are a promising solution to improve coverage and to offload data traffic in mobile networks. In this paper, we propose a signaling-less architecture of the heterogeneous network composed of one single Macro Base Station and a Single Small-Cell. First, we construct a game theoretic framework for channel-state independent interaction. We present many conditions for the existence of Pure Nash equilibrium. Next, and in order to capture the continuous change of the channel state, we build a random matrix game where the channel state is considered to be random (potentially ruled by some given distribution). A characterization of Nash equilibrium is provided in terms of pure strategies and mixed strategies. Convergence to Nash equilibrium is furthermore guaranteed using a variant of the well-known Combined fully distributed payoff and strategy learning. Our algorithm converges faster (only 10-20 iterations are required to converge to Nash equilibrium) and only need a limited amount of local information. This is quite promising since it says that our scheme is almost applicable for all environments (fast fading included).}, isbn = {978-1-4799-5344-8}, issn = {2376-6492}, author = {Ben Chekroun, Samia and Sabir, Essaid and Kobbane, Abdellatif and Tembine, Hamidou and Bouyakhf, El-Houssine and Ibrahimi, Khalil} } @article {Benkaouz201594, title = {Distributed privacy-preserving data aggregation via anonymization}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9466}, year = {2015}, note = {cited By 0}, pages = {94-108}, abstract = {Data aggregation is a key element in many applications that draw insights from data analytics, such as medical research, smart metering, recommendation systems and real-time marketing. In general, data is gathered from several sources, processed, and publicly released for data analysis. Since the considered data might contain personal and sensitive information, special handling of private data is required. In this paper, we present a novel distributed privacy-preserving data aggregation protocol, called ADiPA. It relies on anonymization techniques for protecting personal data, such as k-anonymity, l-diversity and t-closeness. Its purpose is to allow a set of entities to derive aggregate results from data tables that are partitioned across these entities in a fully decentralized manner while preserving the privacy of their individual sensitive inputs. ADiPA neither relies on a trusted third party nor on cryptographic techniques. The protocol performs accurate aggregation when communication links and nodes do not fail. {\textcopyright} Springer International Publishing Switzerland 2015.}, doi = {10.1007/978-3-319-26850-7_7}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84961141090\&doi=10.1007\%2f978-3-319-26850-7_7\&partnerID=40\&md5=518bf129894b27ee29b635e53f9baf0c}, author = {Benkaouz, Y.a and Erradi, M.a and Freisleben, B.b} } @article {10895165620150901, title = {A distributed protocol for privacy preserving aggregation with non-permanent participants.}, journal = {Computing}, volume = {97}, number = {9}, year = {2015}, pages = {893 - 912}, abstract = {Recent advances in techniques that combine and analyze data collected from multiple partners led to many new promising distributed collaborative applications. Such collaborative computations could occur between trusted partners, between partially trusted partners, or between competitors. Therefore preserving privacy is an important issue in this context. This paper presents a distributed protocol for privacy-preserving aggregation to enable computing a class of aggregation functions that can be expressed as Abelian group. The proposed protocol is based on an overlay structure that enables secret sharing without the need of any central authority or heavyweight cryptography. It preserves data privacy such that participant data is only known to their owner with a given probability. The aggregation result is computed by participants themselves without interacting with a specific aggregator. The aggregation result is accurate when there is no data loss. A strategy to handle the problem of}, keywords = {68M14 Distributed systems, 68W15 Distributed algorithms, Acquisition of data, Aggregation protocol, Computer simulation, Distributed polling application, Privacy, Probability theory, Security, Subroutines (Computer programs)}, issn = {0010485X}, url = {http://search.ebscohost.com/login.aspx?direct=true\&db=bth\&AN=108951656\&site=ehost-live}, author = {Benkaouz, Yahya and Erradi, Mohammed} } @article {Benkaouz2015893, title = {A distributed protocol for privacy preserving aggregation with non-permanent participants}, journal = {Computing}, volume = {97}, number = {9}, year = {2015}, note = {cited By 0}, pages = {893-912}, abstract = {Recent advances in techniques that combine and analyze data collected from multiple partners led to many new promising distributed collaborative applications. Such collaborative computations could occur between trusted partners, between partially trusted partners, or between competitors. Therefore preserving privacy is an important issue in this context. This paper presents a distributed protocol for privacy-preserving aggregation to enable computing a class of aggregation functions that can be expressed as Abelian group. The proposed protocol is based on an overlay structure that enables secret sharing without the need of any central authority or heavyweight cryptography. It preserves data privacy such that participant data is only known to their owner with a given probability. The aggregation result is computed by participants themselves without interacting with a specific aggregator. The aggregation result is accurate when there is no data loss. A strategy to handle the problem of nodes failures is given, along with a study of the privacy ensured by the suggested protocol. {\textcopyright} 2013, Springer-Verlag Wien.}, doi = {10.1007/s00607-013-0373-6}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84939469312\&doi=10.1007\%2fs00607-013-0373-6\&partnerID=40\&md5=a64ee416ee9b81d024e5b0975f2f32f0}, author = {Benkaouz, Y. and Erradi, M.} } @article { ISI:000359820400003, title = {A distributed protocol for privacy preserving aggregation with non-permanent participants}, journal = {COMPUTING}, volume = {97}, number = {9, SI}, year = {2015}, note = {International Conference on Network Systems (NETYS 2013), mARRAKECH, MOROCCO, MAY 02-04, 2013}, month = {SEP}, pages = {893-912}, abstract = {Recent advances in techniques that combine and analyze data collected from multiple partners led to many new promising distributed collaborative applications. Such collaborative computations could occur between trusted partners, between partially trusted partners, or between competitors. Therefore preserving privacy is an important issue in this context. This paper presents a distributed protocol for privacy-preserving aggregation to enable computing a class of aggregation functions that can be expressed as Abelian group. The proposed protocol is based on an overlay structure that enables secret sharing without the need of any central authority or heavyweight cryptography. It preserves data privacy such that participant data is only known to their owner with a given probability. The aggregation result is computed by participants themselves without interacting with a specific aggregator. The aggregation result is accurate when there is no data loss. A strategy to handle the problem of nodes failures is given, along with a study of the privacy ensured by the suggested protocol.}, issn = {0010-485X}, doi = {10.1007/s00607-013-0373-6}, author = {Benkaouz, Yahya and Erradi, Mohammed} } @article {Khtira2015592, title = {Duplication detection when evolving feature models of software product lines}, journal = {Information (Switzerland)}, volume = {6}, number = {4}, year = {2015}, note = {cited By 1}, pages = {592-612}, abstract = {After the derivation of specific applications from a software product line, the applications keep evolving with respect to new customer{\textquoteright}s requirements. In general, evolutions in most industrial projects are expressed using natural language, because it is the easiest and the most flexible way for customers to express their needs. However, the use of this means of communication has shown its limits in detecting defects, such as inconsistency and duplication, when evolving the existing models of the software product line. The aim of this paper is to transform the natural language specifications of new evolutions into a more formal representation using natural language processing. Then, an algorithm is proposed to automatically detect duplication between these specifications and the existing product line feature models. In order to instantiate the proposed solution, a tool is developed to automatize the two operations. {\textcopyright} 2015 by the authors.}, doi = {10.3390/info6040592}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952314845\&doi=10.3390\%2finfo6040592\&partnerID=40\&md5=0661de379421d5877cc91927d0c41731}, author = {Khtira, A. and Benlarabi, A. and El Asri, B.} } @conference {Baya2015439, title = {Dynamic large scale product lines through modularization approach}, booktitle = {ICEIS 2015 - 17th International Conference on Enterprise Information Systems, Proceedings}, volume = {2}, year = {2015}, note = {cited By 0}, pages = {439-444}, abstract = {Software product line (SPL) now faces major scalability problems because of technical advances of the past decades. However, using traditional approaches of software engineering to deal with this increasing scalability is not feasible. Therefore, new techniques must be provided in order to resolve scalability issues. For such a purpose, we propose through this paper a modularization approach according to two dimensions: In the first dimension we use Island algorithm in order to obtain structural modules. In the second dimension we decompose obtained modules according to features binding time so as to obtain dynamic submodules. Copyright {\textcopyright} 2015 SCITEPRESS - Science and Technology Publications.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84939529035\&partnerID=40\&md5=1e1405f89ebaaff933ee12e07dcfc995}, author = {Baya, A. and El Asri, B. and Dehmouch, I. and Mcharfi, Z.} } @article {Aouinatou20158, title = {A dynamic study with side channel against an identification based encryption}, journal = {International Journal of Communication Networks and Information Security}, volume = {7}, number = {1}, year = {2015}, note = {cited By 1}, pages = {8-19}, abstract = {Recently, the side channel keeps the attention of researchers in theory of pairing, since, several studies have been done in this subject and all they have the aim in order to attack the cryptosystems of Identification Based Encryption (IBE) which are integrated into Smart Cards (more than 80\% of those cryptosystems are based on a pairing). The great success and the remarkable development of the cryptography IBE in the recent years and the direct connection of this success to the ability of resistance against any kind of attack, especially the DPA (Differential Power Analysis) and DFA (Differential Fault Analysis) attacks, leave us to browse saying all the studies of the DPA and DFA attacks applied to a pairing and we have observed that they have no great effect to attack the cryptosystems of IBE. That is what we will see in this paper. In this work we will illuminate the effect of the DPA attack on a cryptosystems of IBE and we would see on what level we can arrive. Thus in the case where this attack can influence on those cryptosystems, we can present an appropriate counter-measures to resist such attack. In the other part, we will also propose a convenient counter-measure to defend against the DFA attack when the embedding degree is even.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929307912\&partnerID=40\&md5=01269c358b3d93b26c9cc87bd33fc777}, author = {Aouinatou, R.a and Belkasmi, M.b and Askali, M.b} } @conference {Walid20142817, title = {A decentralized network selection algorithm for group vertical handover in heterogeneous networks}, booktitle = {IEEE Wireless Communications and Networking Conference, WCNC}, year = {2014}, note = {cited By 5}, pages = {2817-2821}, abstract = {The traditional vertical handover schemes postulate that vertical handover of each user comes on an individual basis. This enables the users to know previously the decision already made by other users, and then the choice will be made accordingly. However, in the case of a group vertical handover, almost all the VHO decisions - which will certainly choose the best network, will be made at the same time which will lead to system performance degradation or network congestion. In this paper, we propose a totally decentralized algorithm for network selection which based on the Congestion Game to resolve the problem of network congestion in GVHO. Therefore, the proposed algorithm named Fully Decentralized Nash Learning Algorithm with incomplete information is a prediction done by each mobile in the group that helps them to reach the Nash equilibrium. Simulation results validate the algorithm and show its robustness under two scenarios. In the first one, we examine the algorithm with a fixed number of mobiles in group to evaluate the mixed strategy and the average perceived throughput of mobiles in WIMAX and HSDPA on the basis of iteration. In the second one, we examine the algorithm with different number of mobiles in group for testing the average number of iterations needed to reach the Nash equilibrium. We also compare it with the traditional vertical handover algorithm. {\textcopyright} 2014 IEEE.}, doi = {10.1109/WCNC.2014.6952895}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84912123155\&doi=10.1109\%2fWCNC.2014.6952895\&partnerID=40\&md5=35a46d7aee23563018fc6f10fc8506ab}, author = {Walid, A.a and El Kamili, M.b and Kobbane, A.a and Mabrouk, A.a and Sabir, E.c and El Koutbi, M.a} } @conference {Romadi2014, title = {Detection and recognition of road signs in a video stream based on the shape of the panels}, booktitle = {2014 9th International Conference on Intelligent Systems: Theories and Applications, SITA 2014}, year = {2014}, note = {cited By 0}, abstract = {In this paper, we present a robust approach of automatic detection and recognition of road signs in national roads, starting from the images resulting from a video stream taken by a camera embarked on a vehicle. Our approach is composed of three main phases: the first phase is to extract video stream images containing a circle or a triangle. This extraction is performed respectively by Hough transformation and Ramer-Douglas-Peucker filter, the second phase consists of extraction areas of the calculated image, in the previous phase. In the third and last phase, we proceed to a matching of the extracted image areas with signs of reference by comparison of interest points extracted by the SURF method and the matching method FLANN. {\textcopyright} 2014 IEEE.}, doi = {10.1109/SITA.2014.6847285}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904598231\&doi=10.1109\%2fSITA.2014.6847285\&partnerID=40\&md5=62713d5791e141de32775236f194bb72}, author = {Romadi, M. and Oulah Haj Thami, R. and Romadi, R. and Chiheb, R.} } @conference { ISI:000346142800010, title = {Detection and recognition of road signs in a video stream based on the shape of the panels.}, booktitle = {2014 9TH INTERNATIONAL CONFERENCE ON INTELLIGENT SYSTEMS: THEORIES AND APPLICATIONS (SITA{\textquoteright}14)}, year = {2014}, note = {9th International Conference on Intelligent Systems - Theories and Applications (SITA), Inst Natl Postes \& Telecommunicat, Rabat, MOROCCO, MAY 07-08, 2014}, publisher = {IEEE; IEEE Morocco}, organization = {IEEE; IEEE Morocco}, abstract = {In this paper, we present a robust approach of automatic detection and recognition of road signs in national roads, starting from the images resulting from a video stream taken by a camera embarked on a vehicle. Our approach is composed of three main phases: the first phase is to extract video stream images containing a circle or a triangle. This extraction is performed respectively by Hough transformation and Ramer-Douglas-Peucker filter, the second phase consists of extraction areas of the calculated image, in the previous phase. In the third and last phase, we proceed to a matching of the extracted image areas with signs of reference by comparison of interest points extracted by the SURF method and the matching method FLANN.}, isbn = {978-1-4799-3566-6}, author = {Romadi, Mohammed and Oulahhajthami, Rachid and Romadi, Rahal and Chiheb, Raddouane} } @conference {Naghar20142018, title = {Development of a calculator for Edge and Parallel Coupled Microstrip band pass filters}, booktitle = {IEEE Antennas and Propagation Society, AP-S International Symposium (Digest)}, year = {2014}, note = {cited By 3}, pages = {2018-2019}, abstract = {This paper presents an implemented calculator tool for the design of Edge/ Parallel Coupled Microstrip Band Pass Filters (PCMBPF) that makes use of the MATLAB software. This calculator allows estimating both the parameters required for the design of the PCMBPF and the electrical response which is obtained by means of the equivalent circuit of this type of filters. Based on the transmission line theory approach (TLTA), the calculator herein proposed is a good solution to simply obtain the design parameters of this type of filters given that all formulas required for the PCMBPF design are programmed using close-form mathematic expressions and the coupling matrix concept. In order to validate our calculator performance, we implemented the proposed filter in a commercial electromagnetic simulator CST MWs that considers a set of electromagnetic effects, and accurately determine the final filter design. Secondly, we compared these simulation outcomes with the measurement results, achieving a reasonable agreement. {\textcopyright} 2014 IEEE.}, doi = {10.1109/APS.2014.6905336}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907870530\&doi=10.1109\%2fAPS.2014.6905336\&partnerID=40\&md5=bc3cd87613500ca710b9f58fdca9f27d}, author = {Naghar, A.a and Aghzout, O.a and Vazquez Alejos, A.b and Garcia Sanchez, M.b and Essaaidi, M.c} } @conference { ISI:000361554402054, title = {Development of a Calculator for Edge and Parallel Coupled Microstrip Band Pass Filters}, booktitle = {2014 IEEE ANTENNAS AND PROPAGATION SOCIETY INTERNATIONAL SYMPOSIUM (APSURSI)}, series = {IEEE Antennas and Propagation Society International Symposium}, year = {2014}, note = {IEEE Antennas-and-Propagation-Society International Symposium (APSURSI), Memphis, TN, JUL 06-11, 2014}, pages = {2018-2019}, publisher = {Inst Elect Elect Engineers; Inst Elect Elect Engineers Antennas \& Propagat Soc}, organization = {Inst Elect Elect Engineers; Inst Elect Elect Engineers Antennas \& Propagat Soc}, abstract = {This paper presents an implemented calculator tool for the design of Edge/Parallel Coupled Microstrip Band Pass Filters (PCMBPF) that makes use of the MA TLAB software. This calculator allows estimating both the parameters required for the design of the PCMBPF and the electrical response which is obtained by means of the equivalent circuit of this type of filters. Based on the transmission line theory approach (TLTA), the calculator herein proposed is a good solution to simply obtain the design parameters of this type of filters given that all formulas required for the PCMBPF design are programmed using close-form mathematic expressions and the coupling matrix concept. In order to validate our calculator performance, we implemented the proposed filter in a commercial electromagnetic simulator CST MWs that considers a set of electromagnetic effects, and accurately determine the final filter design. Secondly, we compared these simulation outcomes with the measurement results, achieving a reasonable agreement.}, isbn = {978-1-4799-3540-6}, issn = {1522-3965}, author = {Naghar, Azzeddin and Aghzout, Otman and Vazquez Alejos, Ana and Garcia Sanchez, Manuel and Essaaidi, Mohamed} } @conference { ISI:000366999600103, title = {Domain Specific Modeling Approach for Context-Aware Service Oriented Systems}, booktitle = {2014 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2014}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Marrakech, MOROCCO, APR 14-16, 2014}, pages = {581-587}, abstract = {Domain Specific Modeling (DSM) has gained a lot of research attention, especially concerning costs/time optimization and code quality aspects. On the other hand, CAC (Context Aware Computing) has recently emerged as a new computing paradigm promising adaptable systems development. DSM approach for Context aware service oriented systems (SOS) raises many challenges. Particularly, the challenge of engineering such systems, which consists of the definition of modeling approaches, processes, techniques and tools to facilitate construction of these systems. In this paper, we propose a DSM approach for adaptable service oriented systems named CADSSO (Context Aware, Domain Specific and Service Oriented) modeling approach. Our modeling approach is based on five models: domain specific services model (conforms to a domain specific services meta-model) which is a representation of the domain specific services; service variability model (conforms to our service variability meta-model), it formulates services forms of adaptation; domain specific context model (conforms to our context meta-model), symbolize the services context of use; adaptation rules model, which is the joint between service variability model and context model; and domain specific business rules model, used to model domain specific business.}, isbn = {978-1-4799-3824-7}, author = {Lethrech, Mohammed and Elmagrouni, Issam and Nassar, Mahmoud and Kriouile, Abdelaziz and Kenzi, Adil} } @conference {Lethrech2014575, title = {Domain Specific Modeling approach for context-aware service oriented systems}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2014}, note = {cited By 3}, pages = {575-581}, abstract = {Domain Specific Modeling (DSM) has gained a lot of research attention, especially concerning costs/time optimization and code quality aspects. On the other hand, CAC (Context Aware Computing) has recently emerged as a new computing paradigm promising adaptable systems development. DSM approach for Context aware service oriented systems (SOS) raises many challenges. Particularly, the challenge of engineering such systems, which consists of the definition of modeling approaches, processes, techniques and tools to facilitate construction of these systems. {\textcopyright} 2014 IEEE.}, doi = {10.1109/ICMCS.2014.6911149}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928731012\&doi=10.1109\%2fICMCS.2014.6911149\&partnerID=40\&md5=a5e0f0216c0e3d951b6072f33fe1e997}, author = {Lethrech, M.a and Elmagrouni, I.a and Nassar, M.a and Kriouile, A.a and Kenzi, A.b} } @article {Nouh2013201, title = {Decoding of block codes by using genetic algorithms and permutations set}, journal = {International Journal of Communication Networks and Information Security}, volume = {5}, number = {3}, year = {2013}, note = {cited By 1}, pages = {201-209}, abstract = {Recently Genetic algorithms are successfully used for decoding some classes of error correcting codes. For decoding a linear block code C, these genetic algorithms computes a permutation p of the code generator matrix depending of the received word. Our main contribution in this paper is to choose the permutation p from the automorphism group of C. This choice allows reducing the complexity of re-encoding in the decoding steps when C is cyclic and also to generalize the proposed genetic decoding algorithm for binary nonlinear block codes like the Kerdock codes. In this paper, an efficient stop criterion is proposed and it reduces considerably the decoding complexity of our algorithm. The simulation results of the proposed decoder, over the AWGN channel, show that it reaches the error correcting performances of its competitors. The study of the complexity shows that the proposed decoder is less complex than its competitors that are based also on genetic algorithms.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84890158477\&partnerID=40\&md5=b6dcd56b7afd948385db98325525744e}, author = {Nouh, S. and Chana, I. and Belkasmi, M.} } @article {Harbouche2013137, title = {Deriving multi-agent system behavior}, journal = {International Journal of Software Engineering and its Applications}, volume = {7}, number = {4}, year = {2013}, note = {cited By 1}, pages = {137-156}, abstract = {The multi-agent systems (MAS) have become a very powerful paradigm in the development of complex computer systems. The modeling of these systems can reduce this complexity during the development phases. The Model-Driven Architecture (MDA) approach can be used to resolve this problem. It allows designers to model their systems at different abstraction levels, providing them with automatic model transformations to incrementally refine abstract models into concrete ones. This paper presents a MDA approach to develop multi-agent system with the derivation of the behavior of a given system agent from its global requirements. The suggested approach is based on the definition of an appropriate requirements meta-model (Computational Independent Meta-Model CIMM) and the definition of a target design meta-model (Platform Independent Meta-model PIMM). The CIM models are specified using UML activity diagram extended with collaborations to describe the system global behavior. The agent{\textquoteright}s behavior model (PIM) is in the form of distributed UML state machines. Automatic model transformations between these two models have been designed in order to govern the derivation process. A real application of telediagnosis in neuroscience has been developed using this approach.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884544209\&partnerID=40\&md5=b50eb063699a17d3e7b5f71c9c8137ae}, author = {Harbouche, A.a and Erradi, M.b and Mokhtari, A.c} } @article {Zakriti201387, title = {Design and realization of three-pole bandpass filter with spurious response suppression using defected ground structures}, journal = {Progress In Electromagnetics Research C}, volume = {45}, year = {2013}, note = {cited By 0}, pages = {87-100}, abstract = {In this paper, a three-pole bandpass filter (BPF) using a new defected ground structure (DGS) is discussed. The proposed DGS is incorporated in the ground plane under the feed lines and the coupled lines of a bandpass filter to improve the performance of the filter in both passband and stopband. The banpass filter is designed with a center frequency of 1.8 GHz and a bandwidth of 270 MHz. The suppression of better than 20 dB was achieved for frequencies between 2.2 and 5 GHz. A prototype of BPF was fabricated and tested. Prototype measured data was in good agreement with simulation results.}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84887400153\&partnerID=40\&md5=ff9bafb969c5b00869e951183ae50f47}, author = {Zakriti, A.a and Touhami, N.A.b and Bargach, K.b and Lamsalli, M.a and Essaaidi, M.c} } @conference {Sabir2013, title = {Design of an annular ring Ferry-assisted topology for Wireless Sensor Networks}, booktitle = {Proceedings of 2013 6th Joint IFIP Wireless and Mobile Networking Conference, WMNC 2013}, year = {2013}, note = {cited By 0}, abstract = {To ensure connectivity in highly sparse Wireless Sensor Networks (WSNs), we consider a Ferry-assisted Wireless Sensor Network (FWSN). In our FWSN, message ferries moving along concentric annulus collect the static sensors generated packets and propagate them throughout a ferry-to-ferry forwarding schema to the sink. Moreover, a ferry may embed a sensor and then could be assimilated to a mobile sensor node generating its own data. We provide a mathematical framework based on queuing theory to model the network and analyze its performances in terms of end-to-end throughput. Some of our findings are the characterization of the forwarding queues{\textquoteright} stability and the average end-to-end throughput as well. At stability, we notice a special feature where the average end-to-end throughput becomes independent of the choice of the weighted fair queuing. {\textcopyright} 2013 IEEE.}, doi = {10.1109/WMNC.2013.6548984}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881399979\&doi=10.1109\%2fWMNC.2013.6548984\&partnerID=40\&md5=db7991dded90e289f75582cf59d1e214}, author = {Sabir, E.a and Kobbane, A.b and Koulali, M.-A.c and Erradi, M.b} } @conference { ISI:000350287800020, title = {Detection and Recognition of Road Signs}, booktitle = {2013 3RD INTERNATIONAL SYMPOSIUM ISKO-MAGHREB}, year = {2013}, note = {ISKO-Maghreb 3rd International Symposium, Marrakech, MOROCCO, NOV 08-09, 2013}, publisher = {ISKO}, organization = {ISKO}, abstract = {In this paper, we present an application for the detection and recognition of Moroccan road signs together with their positions in video sequences taken by a mobile scanning vehicle in rural areas. Our approach is based on processing the operations of pictures: morphology, segmentation, edge detection and object recognition. The application runs on an algorithm which consists of five main steps: input (pre-recording video stream), processing of video keyframes, edge detection, detection of geometric shapes known panels, recognizing the meaning and the position of the panel detected.}, isbn = {978-1-4799-3392-1}, author = {Romadi, Mohammed and Oulad Haj Thami, Rachid and Chiheb, Raddouane and Romadi, Rahal} } @conference {Romadi2013, title = {Detection and recognition of road signs}, booktitle = {2013 3rd International Symposium ISKO-Maghreb}, year = {2013}, note = {cited By 0}, abstract = {In this paper, w e present a n application for t h e detection and recognition of Moroccan road signs together with their positions in video sequences taken by a mobile scanning vehicle in rural areas. Our approach is based on processing t h e operations of pictures: morphology, segmentation, edge detection and object recognition. The application runs on an algorithm which consists of five main steps: input (pre-recording video stream), processing of video keyframes, edge detection, detection of geometric shapes known panels, recognizing the meaning and the position of the panel detected. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ISKO-Maghreb.2013.6728127}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84894208510\&doi=10.1109\%2fISKO-Maghreb.2013.6728127\&partnerID=40\&md5=1c3a22fd74b253e4957628821128d02c}, author = {Romadi, M. and Haj Thami, R.O. and Chiheb, R. and Romadi, R.} } @conference {Ouahed201333, title = {A discovery service for automatic composition of web services oriented-agent}, booktitle = {Proceedings of the Workshop on Enabling Technologies: Infrastructure for Collaborative Enterprises, WETICE}, year = {2013}, note = {cited By 1}, pages = {33-35}, abstract = {In this paper we present an approach for automatic web service composition. The process of composition considered includes two layers; the discovery layer which aims to generate a composition schema, and the execution layer which uses a multi-agent system to select web services and execute the composite service. The main focus of this work is on the discovery layer where we propose an approach to optimize the search of the composite service. Firstly, a dependency graph is created by matching the I/O parameters of request and all available web services in register. Then a backward search is performed on this graph to choose the composition schema. The principle used for choosing services is based on the proportion of input and output parameters. Services with the highest proportion are preferred, allowing us to minimize the number of services participating in the solution. Experimentation and performance evaluation of the suggested approach are given at the end of this paper. {\textcopyright} 2013 IEEE.}, doi = {10.1109/WETICE.2013.16}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883530751\&doi=10.1109\%2fWETICE.2013.16\&partnerID=40\&md5=58ccbaef94e0989981dec058cdee4e71}, author = {Ouahed, A.K.E.a and Erradi, M.b and Azzoune, H.c} } @conference { ISI:000350287800019, title = {Dynamic Text Classifier based on Search Engine Features}, booktitle = {2013 3RD INTERNATIONAL SYMPOSIUM ISKO-MAGHREB}, year = {2013}, note = {ISKO-Maghreb 3rd International Symposium, Marrakech, MOROCCO, NOV 08-09, 2013}, publisher = {ISKO}, organization = {ISKO}, abstract = {Search engines and text categorization are two research areas almost inseparable. Where one is studied, the other is referred sooner or later. Automatic text categorization became more important with the enormous increase of the online information, and text classifiers are often there to help search engines classify indexed documents. The main idea presented in this paper consists of using a search engine as a text classifier. A search engine can take advantage of its scoring performances to categorize a new document without requiring building and using other categorization model. K Nearest Neighbors (KNN) principal based on search engine score as similarity measure was used. This approach is highly dependent on the scoring quality of the used search engine. It is a simple approach but can be competitive to other more complex categorization models. Also, this method is useful as a kind of categorization on the fly when indexing a new document. Through its evolving index, the search engine becomes a dynamic classifier of the fact that any document, recently joining the index, participate in the categorization of other new documents.}, isbn = {978-1-4799-3392-1}, author = {Machhour, Hamid and Kassou, Ismail} } @conference {Machhour2013, title = {Dynamic text classifier based on search engine features}, booktitle = {2013 3rd International Symposium ISKO-Maghreb}, year = {2013}, note = {cited By 0}, abstract = {Search engines and text categorization are two research areas almost inseparable. Where one is studied, the other is referred sooner or later. Automatic text categorization became more important with the enormous increase of the online information, and text classifiers are often there to help search engines classify indexed documents. The main idea presented in this paper consists of using a search engine as a text classifier. A search engine can take advantage of its scoring performances to categorize a new document without requiring building and using other categorization model. K Nearest Neighbors (KNN) principal based on search engine score as similarity measure was used. This approach is highly dependent on the scoring quality of the used search engine. It is a simple approach but can be competitive to other more complex categorization models. Also, this method is useful as a kind of categorization on the fly when indexing a new document. Through its evolving index, the search engine becomes a dynamic classifier of the fact that any document, recently joining the index, participate in the categorization of other new documents. {\textcopyright} 2013 IEEE.}, doi = {10.1109/ISKO-Maghreb.2013.6728125}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84894172340\&doi=10.1109\%2fISKO-Maghreb.2013.6728125\&partnerID=40\&md5=234e95731bc6177558b03187a1c03083}, author = {Machhour, H. and Kassou, I.} } @conference { ISI:000324984400066, title = {D2V-VNS-RPS: Delay and Delay Variation Constrained Algorithm based on Variable Neighborhood Search Algorithm for RP Selection Problem in PIM-SM Protocol}, booktitle = {PROCEEDINGS OF 2012 INTERNATIONAL CONFERENCE ON COMPLEX SYSTEMS (ICCS12)}, year = {2012}, note = {1st International Conference on Complex Systems (ICCS), Agadir, MOROCCO, NOV 05-06, 2012}, pages = {387-392}, abstract = {Due to the progress of network multimedia technology, a majority of real-time multimedia applications need multicast communication to transmit information. These applications require a multicast routing protocol in which packets arrive to multicast receptors within a specified QOS guaranteed. D2V-RPS{\textquoteright}s problem (delay and delay variation RP Selection) consists in choosing an optimal multicast router in the network as the root of the Shared multicast Tree (ST) within a specified delay and delay variation associated. The choice of this specific router, called RP in PIM-SM protocol, is the main problem concerning the construction of multicast tree; this choice affects the structure of multicast routing tree, and therefore influences performances of both multicast session and routing scheme. The determination of a best position of the RP within a specified delay and delay variation associated is an NP complete problem: it needs to be solved through a heuristic algorithm. In this paper, we propose a new RP Selection algorithm based on Variable Neighborhood Search algorithm, which based on a systematic neighborhood changing. D2V-VNS-RPS algorithm selects the RP router by considering tree cost, delay and delay variation. Simulation results show that good performance is achieved.}, isbn = {978-1-4673-4766-2}, author = {Baddi, Youssef and Ech-Cherif El Kettani, Mohamed Dafir}, editor = {Essaaidi, M and Nemiche, M} } @conference {Baddi2012, title = {D2V-VNS-RPS: Delay and delay variation constrained algorithm based on Variable Neighborhood Search algorithm for RP Selection problem in PIM-SM protocol}, booktitle = {Proceedings of 2012 International Conference on Complex Systems, ICCS 2012}, year = {2012}, note = {cited By 0}, doi = {10.1109/ICoCS.2012.6458559}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874481974\&doi=10.1109\%2fICoCS.2012.6458559\&partnerID=40\&md5=8b28eeceffbaa5aff332a7611a7e3207}, author = {Baddi, Y. and El Kettani, M.D.E.-C.} } @conference { ISI:000325229700053, title = {Design of distributed IMS by classification and evaluation of costs for secured architecture}, booktitle = {2012 SECOND INTERNATIONAL CONFERENCE ON INNOVATIVE COMPUTING TECHNOLOGY (INTECH)}, year = {2012}, note = {2nd International Conference on Innovative Computing Technology (INTECH), Casablanca, MOROCCO, SEP 18-20, 2012}, pages = {291-296}, publisher = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, organization = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, abstract = {The core of Next Generation Network(NGN) IP Multimedia subsystem(IMS) based on SIP as mechanism signaling, is an important challenge for supporting data communication services, voice, video, messaging and web-based technologies. In this work we present a novel design of architecture and turns up some challenges of new IMS architecture and security system. This architecture provides a robustness, reliability, scalability and strategy for extension in the future and responds to the security challenges. We introduced the architecture with clustering database HSS and automatic storage of data that give a secure database. This paper give a classification of security in IMS network, modulate the risk in IMS network and our comparison is giving by cost signaling interworking with and without securing Gateway (SEG). We show that there is a tradeoff between the level of increasing system security and the potential cost incurred. we conclude that this architecture is suitable for operators and services providers for the new business models.}, isbn = {978-1-4673-2679-7}, author = {Allouch, Hamid and Belkasmi, Mostafa} } @conference {Allouch2012291, title = {Design of distributed IMS by classification and evaluation of costs for secured architecture}, booktitle = {2nd International Conference on Innovative Computing Technology, INTECH 2012}, year = {2012}, note = {cited By 0}, pages = {291-296}, doi = {10.1109/INTECH.2012.6457814}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874489555\&doi=10.1109\%2fINTECH.2012.6457814\&partnerID=40\&md5=af127257372de35e045a5f6946b76a7f}, author = {Allouch, H. and Belkasmi, M.} } @conference { ISI:000325229700033, title = {Detection evaluation and testing region incoming people{\textquoteright}s in a simple camera view}, booktitle = {2012 SECOND INTERNATIONAL CONFERENCE ON INNOVATIVE COMPUTING TECHNOLOGY (INTECH)}, year = {2012}, note = {2nd International Conference on Innovative Computing Technology (INTECH), Casablanca, MOROCCO, SEP 18-20, 2012}, pages = {179-183}, publisher = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, organization = {IEEE UK \& RI Consumer Elect Chapter; IEEE Broadcast Technol Chapter; IEEE}, abstract = {Moving object detection is considered as a crucial phase of automatic video surveillance systems, particularly, people detection is the first important step in any technique of video analysis processes wich can be divided in many stations as motion estimation, tracking people etc. Several methods have been developed for this problem of separating the foreground and background pixels in video surveillance. This paper focuses on computable evaluation of some people detection algorithms for four different video sequences. Our study is based on quantitative and qualitative results respectively by calculating the loss of foreground pixels. Particularly, Three methods have been evaluated by using two metrics: False Negative Error (FNE) and False Positive Error (FPE). In the result we choose the algorithm witch minimize the Error (\%). Practically the good technique which dominates on the video surveillance applications is the statistical representation of pixels in foreground which named Gaussian Mixture Model (GMM). In the second part of this paper we control the people entering in a supervised region and we trigger an alarm system in order to find out person presence.}, isbn = {978-1-4673-2679-7}, author = {Ez Zahout, Abderrahmane and Moulay Youssef, Hadi and Haj Thami, Rachid Oulad} } @conference {Ezzahout2012179, title = {Detection evaluation and testing region incoming people{\textquoteright}s in a simple camera view}, booktitle = {2nd International Conference on Innovative Computing Technology, INTECH 2012}, year = {2012}, note = {cited By 0}, pages = {179-183}, doi = {10.1109/INTECH.2012.6457804}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874460739\&doi=10.1109\%2fINTECH.2012.6457804\&partnerID=40\&md5=ac4defc953442c5eec7ef981a25f270f}, author = {Ezzahout, A. and Youssef, H.M. and Thami, R.O.H.} } @article {Koulali2012, title = {Dynamic power control for energy harvesting wireless multimedia sensor networks}, journal = {Eurasip Journal on Wireless Communications and Networking}, volume = {2012}, year = {2012}, note = {cited By 3}, doi = {10.1186/1687-1499-2012-158}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84872869198\&doi=10.1186\%2f1687-1499-2012-158\&partnerID=40\&md5=2b0c34f9aac01ae9f865a22159e62779}, author = {Koulali, M.-A. and Kobbane, A. and Koutbi, M.E. and Tembine, H. and Ben-Othman, J.} } @article { ISI:000305225200001, title = {Dynamic power control for energy harvesting wireless multimedia sensor networks}, journal = {EURASIP JOURNAL ON WIRELESS COMMUNICATIONS AND NETWORKING}, year = {2012}, abstract = {Optimization of energy usage in wireless sensor networks (WSN) has been an active research field for the last decades and various approaches have been explored. In fact, A well designed energy consumption model is the foundation for developing and evaluating a power management scheme in network of energy constrained devices such as: WSN. We are interested in developing optimal centralized power control policies for energy harvesting wireless multimedia sensor networks (WMSN) equipped with photovoltaic cells. We propose a new complete information Markov decision process model to characterize sensor{\textquoteright}s battery discharge/recharge process and inspect the structural properties of optimal transmit policies.}, issn = {1687-1499}, doi = {10.1186/1687-1499-2012-158}, author = {Koulali, Mohammed-Amine and Kobbane, Abdellatif and El Koutbi, Mohammed and Tembine, Hamidou and Ben-Othman, Jalel} } @conference {Kobbane2012518, title = {Dynamic power control with energy constraint for Multimedia Wireless Sensor Networks}, booktitle = {IEEE International Conference on Communications}, year = {2012}, note = {cited By 2}, pages = {518-522}, doi = {10.1109/ICC.2012.6363971}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84871981947\&doi=10.1109\%2fICC.2012.6363971\&partnerID=40\&md5=2e71bfe8b840a119435a1fb89dd122f0}, author = {Kobbane, A. and Koulali, M.-A. and Tembine, H. and Koutbi, M.E. and Ben-Othman, J.} } @conference { ISI:000312855700097, title = {Dynamic Power Control with Energy Constraint for Multimedia Wireless Sensor Networks}, booktitle = {2012 IEEE INTERNATIONAL CONFERENCE ON COMMUNICATIONS (ICC)}, series = {IEEE International Conference on Communications}, year = {2012}, note = {IEEE International Conference on Communications (ICC), Ottawa, CANADA, JUN 10-15, 2012}, publisher = {IEEE}, organization = {IEEE}, abstract = {In recent years, many approaches and techniques have been explored for the optimization of energy usage in Wireless Sensor Networks (WSN). It is well recognized that a proper energy consumption model is the foundation for developing and evaluating a power management scheme in WSN. In this paper, we propose a new complete information Markov Decision Process (MDP) model to characterize sensors energy levels. We also propose and compare several centralized power control policies to select the more efficient policy that optimizes throughput and energy consumption.}, isbn = {978-1-4577-2053-6}, issn = {1550-3607}, author = {Kobbane, A. and Koulali, M. -A. and Tembine, H. and El Koutbi, M. and Ben-Othman, J.} } @conference { ISI:000310353000129, title = {A Dynamic Proxy for Lightweight Web Service Composition}, booktitle = {2012 INTERNATIONAL CONFERENCE ON MULTIMEDIA COMPUTING AND SYSTEMS (ICMCS)}, year = {2012}, note = {International Conference on Multimedia Computing and Systems (ICMCS), Tangiers, MOROCCO, MAY 10-12, 2012}, pages = {699-704}, publisher = {Morocco Sect MTT/AP Joint Chapter}, organization = {Morocco Sect MTT/AP Joint Chapter}, abstract = {Web Services (WS) enable access to remote applications over the Internet. Composition thereof gives rise to more complex services by integrating applications over borders of companies, institutions, and countries. But such compositions fail if they cannot dynamically adapt to change in such an ever changing environment. Unfortunately web compositions today are still implemented in a static way. Implementing invocations of web services is too verbose and heavy-weight. It requires manual extraction of each service{\textquoteright}s information from interface descriptions and because of the missing transparency in composition languages, developers repetitively have to write low-level invocation code. This paper proposes a light-weight approach for automatic and transparent web service compositions, supporting standard SOAP-based web services. We provide a proof-of-concept as a scripting language. The benefit is that our web services composition approach makes compositions simpler and more transparent, because invoking a web service is no more complicated than invoking a local method.}, isbn = {978-1-4673-1520-3}, author = {Najam, Sara and Dinkelaker, Tom and Erradi, Mohamed and Ouzzif, Mohamed}, editor = {Essaaidi, M and Zaz, Y} } @conference {Douiri20111456, title = {Direct electromagnetic torque control of induction motors powered by high power PWM inverters for two levels or three levels}, booktitle = {Progress in Electromagnetics Research Symposium}, year = {2011}, note = {cited By 0}, pages = {1456-1460}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84055183048\&partnerID=40\&md5=8b40b36f592e36d14b3b967fe852960f}, author = {Douiri, M.R. and Cherkaoui, M. and Nasser, T. and Essadki, A.} } @conference {Douiri2011, title = {Direct torque fuzzy controlled induction machine drive using an optimized extended Kalman filter}, booktitle = {2011 International Conference on Communications, Computing and Control Applications, CCCA 2011}, year = {2011}, note = {cited By 0}, doi = {10.1109/CCCA.2011.6031399}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80054799287\&doi=10.1109\%2fCCCA.2011.6031399\&partnerID=40\&md5=0f26ab2338a09b165f1925dc6916cb9c}, author = {Douiri, M.R. and Cherkaoui, M. and Nasser, T. and Essadki, A.} } @conference {ElGhayam2011, title = {Distributed context management in collaborative environment}, booktitle = {2011 11th Annual International Conference on New Technologies of Distributed Systems, NOTERE 2011 - Proceedings}, year = {2011}, note = {cited By 0}, doi = {10.1109/NOTERE.2011.5957991}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052003691\&doi=10.1109\%2fNOTERE.2011.5957991\&partnerID=40\&md5=dccafacd8741c885d613457f2aae2039}, author = {El Ghayam, Y. and Erradi, M.} } @conference {Zoubairi2011, title = {Dynamic QoS management in mobile services framework}, booktitle = {International Conference on Multimedia Computing and Systems -Proceedings}, year = {2011}, note = {cited By 0}, doi = {10.1109/ICMCS.2011.5945691}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79961242339\&doi=10.1109\%2fICMCS.2011.5945691\&partnerID=40\&md5=dc02a4eb28cdfcfae12bca1fa64efde3}, author = {Zoubairi, R. and Jarir, Z. and Erradi, M.} } @article { ISI:000293491800013, title = {Dynamic robust power allocation games under channel uncertainty and time delays}, journal = {COMPUTER COMMUNICATIONS}, volume = {34}, number = {12}, year = {2011}, month = {AUG 2}, pages = {1529-1537}, abstract = {In this paper, we study dynamic robust power allocation strategies under the imperfectness of the channel state information at the transmitters. Considering unknown payoff functions at the transmitters, we propose an heterogeneous Delayed COmbined fully Distributed Payoff and Strategy Reinforcement Learning (Delayed-CODIPAS-RL) in which each transmitter learns its payoff function as well as its associated optimal strategies in the long-term. We show that equilibrium power allocations can be obtained using the multiplicative weighted imitative CODIPAS-RLs and Bush-Mosteller based CODIPAS-RL. We also show almost sure convergence to the set of global optima for specific scenarios. (C) 2011 Elsevier B.V. All rights reserved.}, issn = {0140-3664}, doi = {10.1016/j.comcom.2011.02.009}, author = {Tembine, Hamidou and Kobbane, Abdellatif and El koutbi, Mohamed} } @article {Tembine20111529, title = {Dynamic robust power allocation games under channel uncertainty and time delays}, journal = {Computer Communications}, volume = {34}, number = {12}, year = {2011}, note = {cited By 1}, pages = {1529-1537}, doi = {10.1016/j.comcom.2011.02.009}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79959380843\&doi=10.1016\%2fj.comcom.2011.02.009\&partnerID=40\&md5=5103dd198356953b3e6cf4a1d9c94519}, author = {Tembine, H. and Kobbane, A. and Koutbi, M.E.} } @article {Enneya2011479, title = {A dynamic timestamp discrepancy against replay attacks in MANET}, journal = {Communications in Computer and Information Science}, volume = {254 CCIS}, number = {PART 4}, year = {2011}, note = {cited By 0}, pages = {479-489}, doi = {10.1007/978-3-642-25483-3_40}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-82955164015\&doi=10.1007\%2f978-3-642-25483-3_40\&partnerID=40\&md5=ad023b905f7e8c18ada1a4d0d7e2c224}, author = {Enneya, N. and Baayer, A. and Elkoutbi, M.} } @conference { ISI:000310937500040, title = {A Dynamic Timestamp Discrepancy against Replay Attacks in MANET}, booktitle = {INFORMATICS ENGINEERING AND INFORMATION SCIENCE, PT IV}, series = {Communications in Computer and Information Science}, volume = {254}, year = {2011}, note = {International Conference on Informatics Engineering and Information Science (ICIEIS 2011), Univ Teknol Malaysia, Kuala Lumpur, MALAYSIA, NOV 14-16, 2011}, pages = {479+}, publisher = {Springer}, organization = {Springer}, abstract = {Mobile Ad hoc NETworks (MANETs), like traditional networks, are vulnerable to a wide range of Denial-of-Service (DoS) attacks. A Replay attack is one of them that degrade severely the MANET performance. A replay attacker performs this attack by interception and retransmission of the valid signed messages. The validation of signed messages is verified by a timestamp discrepancy fixed by sender and receiver nodes. In this paper, we propose an enhancement of the timestamp concept, to avoid replay attack, characterized by two properties. The first is dynamic because it depends on the communication duration between sender and receiver nodes. The second is rigid because it estimates approximately the maximum date when the signed message should arrive to receive node.}, isbn = {978-3-642-25482-6; 978-3-642-25483-3}, issn = {1865-0929}, author = {Enneya, Nourddine and Baayer, Aziz and Elkoutbi, Mohammed}, editor = {AbdManaf, A and Sahibuddin, S and Ahmad, R and Daud, SM and ElQawasmeh, E} } @conference {ElGhayam2010151, title = {Decision tree based context management in a collaborative environment1}, booktitle = {NOTERE{\textquoteright}10 - 10th Annual International Conference on New Technologies of Distributed Systems}, year = {2010}, note = {cited By 0}, pages = {151-156}, doi = {10.1109/NOTERE.2010.5536734}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77955951688\&doi=10.1109\%2fNOTERE.2010.5536734\&partnerID=40\&md5=b0648dfb2d81dbc54205839215559aaf}, author = {El Ghayam, Y. and Erradi, M.} } @conference {Elbhiri2010, title = {Developed Distributed Energy-Efficient Clustering (DDEEC) for heterogeneous wireless sensor networks}, booktitle = {2010 5th International Symposium on I/V Communications and Mobile Networks, ISIVC 2010}, year = {2010}, note = {cited By 2}, doi = {10.1109/ISVC.2010.5656252}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78651495359\&doi=10.1109\%2fISVC.2010.5656252\&partnerID=40\&md5=04c6f29644071a785006aab6b93c9c27}, author = {Elbhiri, B. and Rachid, S. and El Fkihi, S. and Aboutajdine, D.} } @conference {Belkhayat2009, title = {Dynamic model of the E-Strategy inspired from the information systems{\textquoteright} urbanization [Mod{\`e}le dynamique de la E-Strat{\'e}gie inspir{\'e} de l{\textquoteright}urbanisation des syst{\`e}mes d{\textquoteright}information]}, booktitle = {14th International Conference of the Association Information and Management 2009, AIM 2009}, year = {2009}, note = {cited By 0}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84870450763\&partnerID=40\&md5=56a318e3533b30a19d6f426c63bda333}, author = {Belkhayat, N. and Doukkali, P.A. and Regragui, P.B.} } @conference {Berbia2008667, title = {On the decoding of convolutional codes using genetic algorithms}, booktitle = {Proceedings of the International Conference on Computer and Communication Engineering 2008, ICCCE08: Global Links for Human Development}, year = {2008}, note = {cited By 1}, pages = {667-671}, doi = {10.1109/ICCCE.2008.4580688}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-52249083280\&doi=10.1109\%2fICCCE.2008.4580688\&partnerID=40\&md5=8a49644ba289981af8e55b689e45313e}, author = {Berbia, H. and Belkasmi, M. and Elbouanani, F. and Ayoub, F.} } @article { ISI:000256710200013, title = {Description of a teleconferencing floor control protocol and its implementation}, journal = {ENGINEERING APPLICATIONS OF ARTIFICIAL INTELLIGENCE}, volume = {21}, number = {3}, year = {2008}, note = {6th Conference on Modelling and Simulation, Rabat, MOROCCO, 2006}, month = {APR}, pages = {430-441}, abstract = {In this paper, we present a formal specification of a teleconferencing floor control protocol and its implementation. The services provided by this protocol are described within the SCCP IETF document (Simple Conference Control Protocol). Finite state machines are used to model services behaviours part of this protocol. Temporal properties are defined as constraints of the teleconferencing system using SCCP protocol. The dynamic properties are described by the LTL logic (Linear Temporal Logic) and verified using the model-checker Spin/Promela. A prototype of a multimedia teleconferencing system is implemented and it is based on the specified protocol. This implementation uses UML notation and is developed with JMF (Java Media Framework) API. (C) 2007 Elsevier Ltd. All rights reserved.}, issn = {0952-1976}, doi = {10.1016/j.engappai.2007.11.003}, author = {Ouzzif, Mohammed and Erradi, Mohammed and Mountassir, Hassan} } @article { ISI:000242519900010, title = {DISCOBOLE: A service architecture for interconnecting workflow processes}, journal = {COMPUTERS IN INDUSTRY}, volume = {57}, number = {8-9}, year = {2006}, month = {DEC}, pages = {768-777}, abstract = {Process interconnection mechanisms are necessary to coordinate geographically distributed business processes in order to strength awareness inside virtual enterprises, to facilitate multinational e-transactions, etc. Actually, existing business process modelling and enactment systems (workflow systems, project management tools, shared agendas, to do lists, etc.) have been mainly developed to suit enterprise internal needs. Thus, most of these systems are not adapted to inter-enterprise cooperation. As we are interested in workflow processes, we aim, through this paper, to present a model supporting dynamic heterogeneous workflow process interconnection. We consider the interconnection of enterprise workflow processes as the management of a {\textquoteleft}{\textquoteleft}workflow of workflows{{\textquoteright}{\textquoteright}} in which several heterogeneous workflow systems coexist. This paper introduces our process interconnection model, its implementation, and its validation through experimentation. (C) 2006 Elsevier B.V. All rights reserved.}, issn = {0166-3615}, doi = {10.1016/j.compind.2006.04.018}, author = {Baina, Karim and Benali, Khalid and Godart, Claude} }