@article{li_lsda-apf_2023, title = {{LSDA}-{APF}: {A} {Local} {Obstacle} {Avoidance} {Algorithm} for {Unmanned} {Surface} {Vehicles} {Based} on {5G} {Communication} {Environment}}, volume = {138}, issn = {1526-1492, 1526-1506}, shorttitle = {{LSDA}-{APF}}, url = {https://www.techscience.com/CMES/v138n1/54251}, doi = {10.32604/cmes.2023.029367}, abstract = {In view of the complex marine environment of navigation, especially in the case of multiple static and dynamic obstacles, the traditional obstacle avoidance algorithms applied to unmanned surface vehicles (USV) are prone to fall into the trap of local optimization. Therefore, this paper proposes an improved artificial potential field (APF) algorithm, which uses 5G communication technology to communicate between the USV and the control center. The algorithm introduces the USV discrimination mechanism to avoid the USV falling into local optimization when the USV encounter different obstacles in different scenarios. Considering the various scenarios between the USV and other dynamic obstacles such as vessels in the process of performing tasks, the algorithm introduces the concept of dynamic artificial potential field. For the multiple obstacles encountered in the process of USV sailing, based on the International Regulations for Preventing Collisions at Sea (COLREGS), the USV determines whether the next step will fall into local optimization through the discrimination mechanism. The local potential field of the USV will dynamically adjust, and the reverse virtual gravitational potential field will be added to prevent it from falling into the local optimization and avoid collisions. The objective function and cost function are designed at the same time, so that the USV can smoothly switch between the global path and the local obstacle avoidance. The simulation results show that the improved APF algorithm proposed in this paper can successfully avoid various obstacles in the complex marine environment, and take navigation time and economic cost into account.}, language = {en}, number = {1}, urldate = {2024-01-13}, journal = {Computer Modeling in Engineering \& Sciences}, author = {Li, Xiaoli and Jiao, Tongtong and Ma, Jinfeng and Duan, Dongxing and Liang, Shengbin}, year = {2023}, note = {Publisher: Tech Science Press}, pages = {595--617}, } @incollection{yao_hierarchical_2023, address = {Cham}, series = {Studies in {Computational} {Intelligence}}, title = {Hierarchical {Medical} {Classification} {Based} on {DLCF}}, isbn = {978-3-031-12127-2}, url = {https://doi.org/10.1007/978-3-031-12127-2_7}, abstract = {Medical classification is affected by many factors, and the traditional medical classification is usually restricted by factors such as too long text, numerous categories and so on. In order to solve these problems, this paper uses word vector and word vector to mine the text deeply, considering the problem of scattered key features of medical text, introducing long-term and short-term memory network to effectively retain the features of historical information in long text sequence, and using the structure of CNN to extract local features of text, through attention mechanism to obtain key features, considering the problems of many diseases, by using hierarchical classification. To stratify the disease. Combined with the above ideas, a deep DLCF model suitable for long text and multi-classification is designed. This model has obvious advantages in CMDD and other datasets. Compared with the baseline models, this model is superior to the baseline model in accuracy, recall and other indicators.}, language = {en}, urldate = {2023-04-11}, booktitle = {Computer and {Information} {Science}}, publisher = {Springer International Publishing}, author = {Yao, Mingyuan and Sun, Haoran and Liang, Shengbin and Shen, Yanqing and Yukie, Niki}, editor = {Lee, Roger}, year = {2023}, doi = {10.1007/978-3-031-12127-2_7}, keywords = {Dual channel, Hierarchical classification, LSTM-CNN, Medical classification, RF}, pages = {101--115}, } @article{liang_improved_2023, title = {An {Improved} {Dual}-{Channel} {Deep} {Q}-{Network} {Model} for {Tourism} {Recommendation}}, issn = {2167-6461, 2167-647X}, url = {https://www.liebertpub.com/doi/10.1089/big.2021.0353}, doi = {10.1089/big.2021.0353}, language = {en}, urldate = {2023-04-26}, journal = {Big Data}, author = {Liang, Shengbin and Jin, Jiangyong and Ren, Jia and Du, Wencai and Qu, Shenming}, month = mar, year = {2023}, keywords = {context-aware, deep reinforcement learning, dual-channel, tourism recommendation}, pages = {big.2021.0353}, } @article{liang_improved_2021, title = {An {Improved} {Double} {Channel} {Long} {Short}-{Term} {Memory} {Model} for {Medical} {Text} {Classification}}, volume = {2021}, issn = {2040-2309, 2040-2295}, url = {https://www.hindawi.com/journals/jhe/2021/6664893/}, doi = {10.1155/2021/6664893}, abstract = {There are a large number of symptom consultation texts in medical and healthcare Internet communities, and Chinese health segmentation is more complex, which leads to the low accuracy of the existing algorithms for medical text classification. The deep learning model has advantages in extracting abstract features of text effectively. However, for a large number of samples of complex text data, especially for words with ambiguous meanings in the field of Chinese medical diagnosis, the word-level neural network model is insufficient. Therefore, in order to solve the triage and precise treatment of patients, we present an improved Double Channel (DC) mechanism as a significant enhancement to Long Short-Term Memory (LSTM). In this DC mechanism, two channels are used to receive word-level and char-level embedding, respectively, at the same time. Hybrid attention is proposed to combine the current time output with the current time unit state and then using attention to calculate the weight. By calculating the probability distribution of each timestep input data weight, the weight score is obtained, and then weighted summation is performed. At last, the data input by each timestep is subjected to trade-off learning to improve the generalization ability of the model learning. Moreover, we conduct an extensive performance evaluation on two different datasets: cMedQA and Sentiment140. The experimental results show that the DC-LSTM model proposed in this paper has significantly superior accuracy and ROC compared with the basic CNN-LSTM model.}, language = {en}, urldate = {2022-04-28}, journal = {Journal of Healthcare Engineering}, author = {Liang, Shengbin and Chen, Xinan and Ma, Jixin and Du, Wencai and Ma, Huawei}, editor = {Li, Xingwang}, month = feb, year = {2021}, note = {1 citations (Crossref) [2022-09-21]}, pages = {1--8}, } @article{liang_improved_2021, title = {An improved ant colony optimization algorithm based on context for tourism route planning}, volume = {16}, issn = {1932-6203}, url = {https://dx.plos.org/10.1371/journal.pone.0257317}, doi = {10.1371/journal.pone.0257317}, abstract = {To solve the problem of one-sided pursuit of the shortest distance but ignoring the tourist experience in the process of tourism route planning, an improved ant colony optimization algorithm is proposed for tourism route planning. Contextual information of scenic spots significantly effect people’s choice of tourism destination, so the pheromone update strategy is combined with the contextual information such as weather and comfort degree of the scenic spot in the process of searching the global optimal route, so that the pheromone update tends to the path suitable for tourists. At the same time, in order to avoid falling into local optimization, the sub-path support degree is introduced. The experimental results show that the optimized tourism route has greatly improved the tourist experience, the route distance is shortened by 20.5\% and the convergence speed is increased by 21.2\% compared with the basic algorithm, which proves that the improved algorithm is notably effective.}, language = {en}, number = {9}, urldate = {2022-04-28}, journal = {PLOS ONE}, author = {Liang, Shengbin and Jiao, Tongtong and Du, Wencai and Qu, Shenming}, editor = {Oliva, Diego}, month = sep, year = {2021}, note = {3 citations (Crossref) [2022-09-21]}, pages = {e0257317}, } @article{liang_multi-channel_2023, title = {A {Multi}-{Channel} {Text} {Sentiment} {Analysis} {Model} {Integrating} {Pre}-training {Mechanism}}, volume = {52}, copyright = {Copyright (c) 2023 Information Technology and Control}, issn = {2335-884X}, url = {https://itc.ktu.lt/index.php/ITC/article/view/31803}, doi = {10.5755/j01.itc.52.2.31803}, abstract = {The number of tourist attractions reviews, travel notes and other texts has grown exponentially in the Internet age. Effectively mining users’ potential opinions and emotions on tourist attractions, and helping to provide users with better recommendation services, which is of great practical significance. This paper proposes a multi-channel neural network model called Pre-BiLSTM combined with a pre-training mechanism. The model uses a combination of coarse and fine- granularity strategies to extract the features of text information such as reviews and travel notes to improve the performance of text sentiment analysis. First, we construct three channels and use the improved BERT and skip-gram methods with negative sampling to vectorize the word-level and vocabulary-level text, respectively, so as to obtain more abundant textual information. Second, we use the pre-training mechanism of BERT to generate deep bidirectional language representation relationships. Third, the vectors of the three channels are input into the BiLSTM network in parallel to extract global and local features. Finally, the model fuses the text features of the three channels and classifies them using SoftMax classifier. Furthermore, numerical experiments are conducted to demonstrate that Pre-BiLSTM outperforms the baselines by 6.27\%, 12.83\% and 18.12\% in average in terms of accuracy, precision and F1-score.}, language = {en}, number = {2}, urldate = {2024-01-13}, journal = {Information Technology and Control}, author = {Liang, Shengbin and Jin, Jiangyong and Du, Wencai and Qu, Shenming}, month = jul, year = {2023}, note = {Number: 2}, keywords = {Pre-training mechanism}, pages = {263--275}, } @article{li_model_2023, title = {A model of integrating convolution and {BiGRU} dual-channel mechanism for {Chinese} medical text classifications}, volume = {18}, issn = {1932-6203}, url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0282824}, doi = {10.1371/journal.pone.0282824}, abstract = {Recently, a lot of Chinese patients consult treatment plans through social networking platforms, but the Chinese medical text contains rich information, including a large number of medical nomenclatures and symptom descriptions. How to build an intelligence model to automatically classify the text information consulted by patients and recommend the correct department for patients is very important. In order to address the problem of insufficient feature extraction from Chinese medical text and low accuracy, this paper proposes a dual channel Chinese medical text classification model. The model extracts feature of Chinese medical text at different granularity, comprehensively and accurately obtains effective feature information, and finally recommends departments for patients according to text classification. One channel of the model focuses on medical nomenclatures, symptoms and other words related to hospital departments, gives different weights, calculates corresponding feature vectors with convolution kernels of different sizes, and then obtains local text representation. The other channel uses the BiGRU network and attention mechanism to obtain text representation, highlighting the important information of the whole sentence, that is, global text representation. Finally, the model uses full connection layer to combine the representation vectors of the two channels, and uses Softmax classifier for classification. The experimental results show that the accuracy, recall and F1-score of the model are improved by 10.65\%, 8.94\% and 11.62\% respectively compared with the baseline models in average, which proves that our model has better performance and robustness.}, language = {en}, number = {3}, urldate = {2023-04-11}, journal = {PLOS ONE}, author = {Li, Xiaoli and Zhang, Yuying and Jin, Jiangyong and Sun, Fuqi and Li, Na and Liang, Shengbin}, month = mar, year = {2023}, note = {Publisher: Public Library of Science}, keywords = {Convolution, Deep learning, Machine learning, Memory recall, Neural networks, Recurrent neural networks, Semantics, Syntax}, pages = {e0282824}, } @inproceedings{ma_model_2022, title = {A {Model} of {Integrating} {Bert} and {BiGRU}+ {Attention} {Dual}-channel {Mechanism} for {Investor} {Sentiment} {Analysis} of {Stock} {Price} {Forecast}}, url = {https://ieeexplore.ieee.org/document/10051779}, doi = {10.1109/SNPD54884.2022.10051779}, abstract = {Investor sentiment and emotions have a strong impact on financial markets. In recent years there has been increasing interest in analyzing the sentiment of investors for stock price prediction using machine learning. Existing prediction models mostly depend on the analysis of trading data and company profit. few prediction theories have been built based on individual investors' sentiments. The fundamental reason is the difficulty to measure individual investors' sentiment.}, urldate = {2024-01-14}, booktitle = {2022 {IEEE}/{ACIS} 23rd {International} {Conference} on {Software} {Engineering}, {Artificial} {Intelligence}, {Networking} and {Parallel}/{Distributed} {Computing} ({SNPD})}, author = {Ma, Huawei and Ma, Jixin and Liang, Shengbin and Du, Wencai}, month = dec, year = {2022}, note = {ISSN: 2693-8421}, pages = {126--131}, } @inproceedings{ma_model_2022, address = {Taichung, Taiwan}, title = {A {Model} of {Integrating} {Bert} and {BiGRU}+ {Attention} {Dual}-channel {Mechanism} for {Investor} {Sentiment} {Analysis} of {Stock} {Price} {Forecast}}, isbn = {9798350310412}, url = {https://ieeexplore.ieee.org/document/10051779/}, doi = {10.1109/SNPD54884.2022.10051779}, urldate = {2023-04-26}, booktitle = {2022 {IEEE}/{ACIS} 23rd {International} {Conference} on {Software} {Engineering}, {Artificial} {Intelligence}, {Networking} and {Parallel}/{Distributed} {Computing} ({SNPD})}, publisher = {IEEE}, author = {Ma, Huawei and Ma, Jixin and Liang, Shengbin and Du, Wencai}, month = dec, year = {2022}, pages = {126--131}, } @article{liang_medical_2023, title = {A medical text classification approach with {ZEN} and capsule network}, issn = {1573-0484}, url = {https://doi.org/10.1007/s11227-023-05612-6}, doi = {10.1007/s11227-023-05612-6}, abstract = {Text classification is an important topic in natural language processing, with the development of social network, many question-and-answer pairs regarding health-care and medicine flood social platforms. It is of great social value to mine and classify medical text and provide targeted medical services for patients. The existing algorithms of text classification can deal with simple semantic text, especially in the field of Chinese medical text, the text structure is complex and includes a large number of medical nomenclature and professional terms, which are difficult for patients to understand. We propose a Chinese medical text classification model using a BERT-based Chinese text encoder by N-gram representations (ZEN) and capsule network, which represent feature uses the ZEN model and extract the features by capsule network, we also design a N-gram medical dictionary to enhance medical text representation and feature extraction. The experimental results show that the precision, recall and F1-score of our model are improved by 10.25\%, 11.13\% and 12.29\%, respectively, compared with the baseline models in average, which proves that our model has better performance.}, language = {en}, urldate = {2024-01-13}, journal = {The Journal of Supercomputing}, author = {Liang, Shengbin and Sun, Fuqi and Sun, Haoran and Chen, Tingting and Du, Wencai}, month = sep, year = {2023}, keywords = {Capsule network, Medical text classification, Text mining, ZEN model}, } @incollection{hao_dcrc_2023, address = {Cham}, series = {Studies in {Computational} {Intelligence}}, title = {A {DCRC} {Model} for {Text} {Classification}}, isbn = {978-3-031-12127-2}, url = {https://doi.org/10.1007/978-3-031-12127-2_6}, abstract = {Traditional text classification models have some drawbacks, such as the inability of the model to focus on important parts of the text contextual information in text processing. To solve this problem, we fuse the long and short-term memory network BiGRU with a convolutional neural network to receive text sequence input to reduce the dimensionality of the input sequence and to reduce the loss of text features based on the length and context dependency of the input text sequence. Considering the extraction of important features of the text, we choose the long and short-term memory network BiLSTM to capture the main features of the text and thus reduce the loss of features. Finally, we propose a BiGRU-CNN-BiLSTM model (DCRC model) based on CNN, GRU and LSTM, which is trained and validated on the THUCNews and Toutiao News datasets. The model outperformed the traditional model in terms of accuracy, recall and F1 score after experimental comparison.}, language = {en}, urldate = {2023-04-11}, booktitle = {Computer and {Information} {Science}}, publisher = {Springer International Publishing}, author = {Hao, Zhaoquan and Jin, Jiangyong and Liang, Shengbin and Cheng, Suying and Shen, Yanqing}, editor = {Lee, Roger}, year = {2023}, doi = {10.1007/978-3-031-12127-2_6}, keywords = {BiGRU, BiLSTM, CNN, Text classification}, pages = {85--99}, }