@inproceedings{gomes2021dive, abstract = {In this work we present DiVe (Distance-based Vector Embedding), a new word embedding technique based on the Logistic Markov Embedding (LME). First, we generalize LME to consider different distance metrics and address existing scalability issues using negative sampling, thus making DiVe scalable for large datasets. In order to evaluate the quality of word embeddings produced by DiVe, we used them to train standard machine learning classifiers, with the goal of performing different Natural Language Processing (NLP) tasks. Our experiments demonstrated that DiVe is able to outperform existing (more complex) machine learning approaches, while preserving simplicity and scalability.}, address = {Cham}, author = {Guilherme Gomes, Bruno and Murai, Fabricio and Goussevskaia, Olga and da Silva, Ana Paula}, booktitle = {Natural Language Processing and Information Systems}, doi = {10.1007/978-3-030-80599-9_12}, editor = {Métais, Elisabeth and Meziane, Farid and Horacek, Helmut and Kapetanios, Epaminondas}, isbn = {978-3-030-80599-9}, pages = {135--146}, publisher = {Springer International Publishing}, title = {Sequence-Based Word Embeddings for Effective Text Classification}, year = {2021} }