@comment{{This file has been generated by bib2bib 1.99}}
@comment{{Command line: bib2bib -c 'author : "Rico"' ../Tesis/Tesis.bib}}
@comment{{x-kbibtex-personnameformatting=<%l><, %f>}}
@book{Rico-Juan:1999:book,
  abstract = {Este libro describe dos esquemas de programaci{\'o}n: $programaci\'on din{\'a}ica$ y ramificaci{\'o}n y poda$. La descripci\'on se hace desde un punto de vista general extrayendo caracter\'istcas representativas y generando un esquema que luego se aplicar\'a a casos concretos. Contiene gran variadad de ejemplos y ejerccios resueltos$},
  author = {Rico-Juan, Juan Ram{\'o}n},
  key = {ISBN 84-7908-481-2},
  publisher = {Publicaciones de la Universidad de Alicante},
  title = {{Esquemas Algor{\'i}tmicos}},
  year = {1999}
}
@phdthesis{Rico-Juan:2001,
  abstract = {Una de las aportaciones originales de esta tesis es la definici{\'o}n de un modelo de inferencia estoc{\'a}stica para lenguajes $k$-testables de {\'a}rboles y su aplicaci{\'o}n a la compresi{\'o}n y clasificaci{\'o}n. Tambi{\'e}n se aportan otros modelos probabil{\'i}sticos para las tareas de compresi{\'o}n de superficies 3D y reconocimiento de palabras manuscritas fuera de l{\'i}nea},
  author = {Rico-Juan, Juan Ram{\'o}n},
  month = may,
  pdf = {http://www.dlsi.ua.es/~juanra/papers/Tesis.pdf},
  school = {Universidad de Alicante, Departamento de Lenguajes y Sistemas Inform{\'a}ticos},
  title = {{Inferencia estoc{\'a}stica y aplicaciones de los lenguajes de {\'a}rboles}},
  year = {2001}
}
@article{Abreu+Rico-Juan:prl:2011,
  abstract = {This paper describes a new method for quantifying the regularity of contours and comparing them (when encoded by Freeman chain codes) in terms of a similarity criterion which relies on information gathered from Levenshtein edit distance computation. The criterion used allows subsequences to be found from the minimal cost edit sequence that specifies an alignment of contour segments which are similar. Two external parameters adjust the similarity criterion. The information about each similar part is encoded by strings that represent an average contour region. An explanation of how to construct a prototype based on the identified regularities is also reviewed. The reliability of the prototypes is evaluated by replacing contour groups (samples) by new prototypes used as the training set in a classification task. This way, the size of the data set can be reduced without sensibly affecting its representational power for classification purposes. Experimental results show that this scheme achieves a reduction in the size of the training data set of about 80\% while the classification error only increases by 0.45\% in one of the three data sets studied.},
  author = {Abreu, J. and Rico-Juan, J. R.},
  impact_factor = {1.034 - Q3 - JCR},
  journal = {Pattern Recognition Letters},
  month = jul,
  pages = {1421--1427},
  title = {{Characterization of contour regularities based on the {Levenshtein} edit distance}},
  volume = {32},
  year = {2011}
}
@article{Abreu+Rico-Juan:prl:2013,
  abstract = {This paper presents a new fast algorithm for computing an approximation to the mean of two strings of characters representing a 2D shape and its application to a new Wilson-based editing procedure. The approximate mean is built up by including some symbols from the two original strings. In addition, a greedy approach to this algorithm is studied, which allows us to reduce the time required to compute an approximate mean. The new dataset editing scheme relaxes the criterion for deleting instances proposed by the \emph{Wilson} editing procedure. In practice, not all instances misclassified by their near neighbors are pruned. Instead, an artificial instance is added to the dataset in the hope of successfully classifying the instance in the future. The new artificial instance is the approximated mean of the misclassified sample and its same-class nearest neighbor. Experiments carried out over three widely known databases of contours show that the proposed algorithm performs very well when computing the mean of two strings, and outperforms methods proposed by other authors. In particular, the low computational time required by the heuristic approach makes it very suitable when dealing with long length strings. Results also show that the proposed preprocessing scheme can reduce the classification error in about $83\%$ of trials. There is empirical evidence that using the greedy approximation to compute the approximated mean does not affect the performance of the editing procedure.},
  author = {Abreu, J. and Rico-Juan, J. R.},
  http = {http://dx.doi.org/10.1016/j.patrec.2012.11.019},
  impact_factor = {1.226 - Q2 - JCR},
  journal = {Pattern Recognition Letters},
  month = dec,
  number = {5},
  pages = {496---504},
  title = {{An improved fast edit approach for two-string approximated mean computation applied to OCR.}},
  volume = {34},
  year = {2013}
}
@article{Rico-Juan+Inesta:prl:2012,
  abstract = {Some new rank methods to select the best prototypes from a training set are proposed in this paper in order to establish its size according to an external parameter, while maintaining the classification accuracy. The traditional methods that filter the training set in a classification task like editing or condensing have some rules that apply to the set in order to remove outliers or keep some prototypes that help in the classification. In our approach, new voting methods are proposed to compute the prototype probability and help to classify correctly a new sample. This probability is the key to sorting the training set out, so a relevance factor from 0 to 1 is used to select the best candidates for each class whose accumulated probabilities are less than that parameter. This approach makes it possible to select the number of prototypes necessary to maintain or even increase the classification accuracy. The results obtained in different high dimensional databases show that these methods maintain the final error rate while reducing the size of the training set.},
  annote = {Ranking de prototipos por probabilidades de pertenencia a una clasificaci{\'o}n correcta},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  http = {http://dx.doi.org/10.1016/j.patrec.2011.07.019},
  impact_factor = {1.226 - Q2 - JCR},
  issn = {0167-8655},
  journal = {Pattern Recognition Letters},
  month = feb,
  number = {5},
  pages = {654--660},
  title = {{New rank methods for reducing the size of the training set using the nearest neighbor rule}},
  volume = {33},
  year = {2012}
}
@article{Rico-Juan+Inesta:nc:2014,
  abstract = {The research community related to the human-interaction framework is becoming increasingly more interested in interactive pattern recognition, taking direct advantage of the feedback information provided by the user in each interaction step in order to improve raw performance. The application of this scheme re[[uires learning techniques that are able to adaptively re-train the system and tune it to user behavior and the specific task considered. Traditional static editing methods filter the training set by applying certain rules in order to eliminate outliers or maintain those prototypes that can be beneficial in classification. This paper presents two new adaptive rank methods for selecting the best prototypes from a training set in order to establish its size according to an external parameter that controls the adaptation process, while maintaining the classification accuracy. These methods estimate the probability of each prototype of correctly classifying a new sample. This probability is used to sort the training set by relevance in classification. The results show that the proposed methods are able to maintain the error rate while reducing the size of the training set, thus allowing new examples to be learned with a few extra computations.},
  annote = {Ranking de prototipos por probabilidades de pertenencia a una clasificaci{\'o}n correcta versi{\'o}n incremental},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  http = {http://dx.doi.org/10.1016/j.neucom.2014.01.033},
  impact_factor = {2.083 - Q2 - JCR},
  issn = {0925-2312},
  journal = {Neurocomputing},
  month = aug,
  number = {1},
  pages = {316--324},
  title = {{Adaptive training set reduction for nearest neighbor classification}},
  volume = {38},
  year = {2014}
}
@article{Rico-Juan+Calvo-Zaragoza:nc:2015,
  abstract = {This paper proposes a new feature representation method based on the construction of a Confidence Matrix (CM). This representation consists of posterior probability values provided by several weak classifiers, each one trained and used in different sets of features from the original sample. The CM allows the final classifier to abstract itself from discovering underlying groups of features. In this work the CM is applied to isolated character image recognition, for which several set of features can be extracted from each sample.
Experimentation has shown that the use of the CM permits a significant improvement in accuracy in most cases, while the others remain the same. The results were obtained after experimenting with four well-known corpora, using evolved meta-classifiers with the k-Nearest Neighbor rule as weak classifier and by applying statistical significance tests.},
  title = {Improving classification using a Confidence Matrix based on weak classifiers applied to {OCR}},
  author = {Rico-Juan, J. R. and Calvo-Zaragoza, J.},
  journal = {Neurocomputing},
  volume = {151},
  pages = {1354--1361},
  year = {2015},
  url = {http://www.sciencedirect.com/science/article/pii/S092523121401443X},
  annote = {Probabilidades de clasificadores débiles como características de aprendizaje},
  impact_factor = {2.392 - Q1 - JCR},
  issn = {0925-2312}
}
@article{Calvo-Zaragoza+Valero-Mas+Rico-Juan:pr:2015,
  abstract = {Prototype Selection (PS) algorithms allow a faster Nearest Neighbor classification by keeping only the most profitable prototypes of the training set. In turn, these schemes typically lowers the performance accuracy. In this work a new strategy for multi-label classifications tasks is proposed to solve this accuracy drop without the need of using all the training set. For that, given a new instance, the PS algorithm is used as a fast recommender system which retrieves the most likely classes. Then, the actual classification is performed only considering the prototypes from the initial training set belonging to the suggested classes. Results show this strategy provides a large set of trade-off solutions which fills the gap between PS-based classification efficiency and conventional kNN accuracy. Furthermore, this scheme is not only able to, at best, reach the performance of conventional kNN with barely a third of distances computed, but it does also outperform the latter in noisy scenarios, proving to be a much more robust approach.},
  title = {Improving kNN multi-label classification in Prototype Selection scenarios using class proposals},
  annote = {kNNc extensión kNN con c clases recomendadas por PS},
  author = {Calvo-Zaragoza, J. and Valero-Mas, J. J. and Rico-Juan, J. R. },
  journal = {Pattern Recognition},
  volume = {48},
  number = {5},
  pages = {1608--1622},
  year = {2015},
  url = {http://dx.doi.org/10.1016/j.patcog.2014.11.015},
  doi = {10.1016/j.patcog.2014.11.015},
  impact_factor = {3.096 - Q1 - JCR},
  issn = {0031-3203}
}
@article{Calvo-Zaragoza+Valero-Mas+Rico-Juan:nca:2017,
  abstract = {Data Reduction techniques play a key role in instance-based classification to lower the amount of
data to be processed. Among the different existing approaches, Prototype Selection (PS) and Prototype Gen-
eration (PG) are the most representative ones. These two families differ in the way the reduced set is ob-
tained from the initial one: while the former aims at selecting the most representative elements from the set,
the latter creates new data out of it. Although PG is considered to delimit more efficiently decision bound-
aries, the operations required are not so well defined in scenarios involving structural data such as strings,
trees or graphs. This work studies the possibility of using Dissimilarity Space (DS) methods as an interme-
diate process for mapping the initial structural representation to a statistical one, thereby allowing the use
of PG methods. A comparative experiment over string data is carried out in which our proposal is faced to PS methods on the original space. Results show that the proposed strategy is able to achieve significantly similar results to PS in the initial space, thus standing as a clear alternative to the classic approach, with some additional advantages derived from the DS representation.},
  title = {Prototype Generation on Structural Data using Dissimilarity Space Representation},
  annote = {PG/PS en DS usando cadenas/puntos consecutivos como datos estructurados},
  author = {Calvo-Zaragoza, J. and Valero-Mas, J. J. and Rico-Juan, J. R. },
  journal = {Neural Computing and Applications},
  volume = {28},
  number = {9},
  pages = {2415--2424},
  year = {2017},
  url = {http://dx.doi.org/10.1007/s00521-016-2278-8},
  doi = {10.1007/s00521-016-2278-8},
  impact_factor = {4.213 - Q1 - JCR},
  issn = {0941-0643}
}
@article{Valero-Mas+Calvo-Zaragoza+Rico-Juan:sc:2017,
  abstract = {Prototype Selection is one of the most popular approaches for addressing the low efficiency issue typically found in the well-known $k$-Nearest Neighbour classification rule. These techniques select a representative subset from an original collection of prototypes with the premise of maintaining the same classification accuracy. Most recently, rank methods have been proposed as an alternative to develop new selection strategies. Following a certain heuristic, these methods sort the elements of the initial collection according to their relevance and then select the best possible subset by means of a parameter representing the amount of data to maintain. Due to the relative novelty of these methods, their performance and competitiveness against other strategies is still unclear. This work performs an exhaustive experimental study of such methods for prototype selection. A representative collection of both classic and sophisticated algorithms are compared to the aforementioned techniques in a number of datasets, including different levels of induced noise. Results report the remarkable competitiveness of these rank methods as well as their excellent trade-off between prototype reduction and achieved accuracy.},
  title = {An Experimental Study on Rank Methods for Prototype Selection},
  annote = {Nuestros métodos de ranking con ruido y 5 BD (USPS[ED], NIST3[ED], HOMUS[DTW], Penbased[HVDM], Letter[HVDM])},
  author = {Valero-Mas, J. J. and Calvo-Zaragoza, J. and Rico-Juan, J. R. },
  journal = {Soft Computing},
  volume = {21},
  number = {19},
  pages = {5703--5715},
  year = {2017},
  url = {http://dx.doi.org/10.1007/s00500-016-2148-4},
  doi = {10.1007/s00500-016-2148-4},
  impact_factor = {2.367 - Q2 - JCR},
  issn = {1432-7643}
}
@article{Calvo-Zaragoza+Valero-Mas+Rico-Juan:sc:2017,
  abstract = {The Nearest Neighbor rule is one of the most considered algorithms for supervised learning because of its simplicity and fair performance in most cases. However, this technique has a number of disadvantages, being the low computational efficiency the most prominent one. This paper presents a strategy to overcome this obstacle in multi-class classification tasks. This strategy proposes the use of Prototype Reduction algorithms that are capable of generating a new training set from the original one to try to gather the same information with fewer samples. Over this reduced set, it is estimated which classes are the closest ones to the input sample. These classes are referred to as \emph{promising classes}. Eventually, classification is performed using the original training set using the Nearest Neighbor rule but restricted to the promising classes. Our experiments with several datasets and significance tests show that a similar classification accuracy can be obtained compared to using the original training set, with a significantly higher efficiency.},
  title = {Selecting promising classes from generated data for an efficient multi-class Nearest Neighbor classification},
  annote = {Metodo kNNc con PG desde otro punto de vista},
  author = {Valero-Mas, J. J. and Calvo-Zaragoza, J. and Rico-Juan, J. R. },
  journal = {Soft Computing},
  volume = {21},
  number = {20},
  pages = {6183--6189},
  year = {2017},
  url = {http://dx.doi.org/10.1007/s00500-016-2176-0},
  doi = {10.1007/s00500-016-2176-0},
  impact_factor = {2.367 - Q2 - 45/132 - JCR},
  issn = {1432-7643}
}
@article{Valero-Mas+Calvo-Zaragoza+Rico-Juan:nc:2016,
  abstract = {In the current Information Age, data production and processing demands are ever increasing. This has motivated the appearance of large-scale distributed information. This phenomenon also applies to Pattern Recognition so that classic and common algorithms, such as the k-Nearest Neighbour, are unable to be used. To improve the efficiency of this classifier, Prototype Selection (PS) strategies can be used. Nevertheless, current PS algorithms were not designed to deal with distributed data, and their performance is therefore unknown under these conditions. This work is devoted to carrying out an experimental study on a simulated framework in which PS strategies can be compared under classical conditions as well as those expected in distributed scenarios. Our results report a general behaviour that is degraded as conditions approach to more realistic scenarios. However, our experiments also show that some methods are able to achieve a fairly similar performance to that of the non-distributed scenario. Thus, although there is a clear need for developing specific PS methodologies and algorithms for tackling these situations, those that reported a higher robustness against such conditions may be good candidates from which to start.},
  title = {On the suitability of Prototype Selection methods for kNN classification with distributed data},
  author = {Valero-Mas, J. J. and Calvo-Zaragoza, J. and Rico-Juan, J. R.},
  journal = {Neurocomputing},
  volume = {203},
  pages = {150--160},
  year = {2016},
  url = {http://dx.doi.org/10.1016/j.neucom.2016.04.018},
  doi = {10.1016/j.neucom.2016.04.018},
  annote = {PS aplicado a diferente nº de particiones y con diferentes ruidos (%\0, 20\% 40\%)},
  impact_factor = {2.392 - Q1 - JCR},
  issn = {0925-2312}
}
@article{Rico-Juan+Inesta:paa:2012,
  abstract = {In this paper, a new approximation to off-line signature verification is proposed based on two-class classifiers using an expert decisions ensemble. Different methods to extract sets of local and a global features from the target sample are detailed. Also a normalisation by confidence voting method is used in order to decrease the final equal error rate (EER). Each set of features is processed by a single expert, and on the other approach proposed, the decisions of the individual classifiers are combined using weighted votes. Experimental results are given using a subcorpus of the large MCYT signature database for random and skilled forgeries. The results show that the weighted combination outperforms the individual classifiers significantly. The best EER obtained were 6.3\% in the case of skilled forgeries and 2.3\% in the case of random forgeries.},
  annote = {Aplicaci{\'o}n de la combinaci{\'o}n de clasificadores d{\'e}biles con probabilidades (inversa distancia) a verficaci{\'o}n de firmas},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  impact_factor = {0.814 - Q3 - JCR},
  issn = {1433-7541},
  journal = {Pattern Analysis and Applications},
  month = apr,
  number = {2},
  pages = {113--120},
  title = {{Confidence voting method ensemble applied to off-line signature verification}},
  volume = {15},
  year = {2012}
}
@inproceedings{Rico-Juan:EEE:2009,
  abstract = {In this paper, we describe a way to create synchronized presentations for mobile devices using only open-source tools. In the framework of higher education, it is important to provide the students with flexible and interactive resources when the time assigned to laboratory or lectures gets decreased. Nowadays, the students have often one o many mobile devices such as mobile phones, smartphones, PDAs (Personal Digital Assistant), etc. This gives teachers the opportunity to create resources for these kind of devices. On the other hand, the open-source software offers an interesting alternative in order to create educational resources, just using a single tool or a combination of them. The main idea here is to describe a procedure to create presentations combining PDF files as slides, audio files with detailed explanations and flash video files (.swf) as showing demos. We describe in detail how to integrate these individual components to create a high quality presentation,, based on vectorial components, with small size of result files. It also allows to play these presentations in a mobile devices. In contrast to commercial tools, our approach does not use special interfaces or formats and it allows one to export presentations to formats compatible with other tools in a future tools. Our proposal also allows one to work with conventional tools to create slides (such as PowerPoint, OpenOffice.org Impress or LaTeX) due to the final slides are exported to PDF and also to use standard audio tools to create audio (WAV, OGG and MP3 are supported). Video can be included just by converting the original file to SWF (flash video) format. In order to make use of the educational resources, we just need a mobile device with a web browser and a flash plug-in installed and, therefore, the result can be easily distributed through a web server or as a package that can be stored locally in the device.},
  address = {Las Vegas, Nevada, USA},
  annote = {Presentaciones audio+video en m{\'o}viles},
  author = {Rico-Juan, J. R.},
  booktitle = {{Proceedings of the 2009 International Conference on e-Learning, e-Business, Enterprise Information Systems and e-Government}},
  month = jul,
  pages = {50--52},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2009_EEE.pdf},
  publisher = {CSREA Press},
  title = {{Creating synchronised presentations for mobile devices using open source tools}},
  year = {2009}
}
@inproceedings{Abreu+Rico-Juan:IbPRIA:2009,
  abstract = {In this paper, we present a new method for constructing prototypes representing a set of contours encoded by Freeman Chain Codes.Our method build new prototypes taking into account similar segments shared between contours instances. The similarity criterion was based on the Levenshtein Edit Distance definition. We also outline how to apply our method to reduce a data set without sensibly affect its representational power for classification purposes. Experimental results shows that our scheme can achieve compressions about 50\% while classification error increases only by 0.75\%.},
  address = {P{\'o}voa de Varzim, Portugal},
  annote = {extracci{\'o}n de regularidades a partir de la distancia de edici{\'o}n de cadenas},
  author = {Abreu, J. I and Rico-Juan, J. R.},
  booktitle = {{Pattern Recognition and Image Analysis. IbPRIA 2009}},
  impact_factor = {Core C},
  month = jun,
  pages = {160--167},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2009_J\_IbPRIA.pdf},
  publisher = {Springer},
  series = {{Lecture Notes in Computer Science}},
  title = {{Contour regularity extraction based on string edit distance}},
  year = {2009}
}
@inproceedings{Rico-Juan+Carrasco:IADAT:2007,
  abstract = {In this paper, we describe a flexible tool to create synchronized presentations using only open-source tools. In the framework of the new European Credit Transfer System (ECTS), it is even more important to provide the students with flexible and interactive resources as the time assigned to laboratory or lectures gets decreased. The open-source software offers an interesting alternative in order to create educational resources, some times using a single tool but often using a combination of them. Here, we describe a procedure to create AV presentations combining PDF files (slides), audio files, flash video files (.swf) and flash video streaming (.flv) . We describe in detail how integrate these individual components to automatically create a high quality presentation, that is, based on vectorial components, with small size of result files. It also allows to integrate video or video streaming into single slides. In contrast to commercial tools, this tool does not use special interfaces or formats and it allows one to export presentations to formats compatible with other (future) presentation tools. Our tool also allows one to work with traditional tools to create slides (such as PowerPoint, OpenOffice Impress or LaTeX) provided that the final slides are exported to PDF and also to use standard audio tools to create audio (WAV, OGG and MP3 are supported). Video can be included just by converting the original file to SWF (flash video) format or FLV (flash video streaming). In order to make use of the educational resource, we need just a web browser with flash plug-in installed and, therefore, the result can be easily distributed through a web server, a CD or a DVD.},
  address = {Palma de Mallorca, (Spain)},
  annote = {Articulo sobre la video presentaciones},
  author = {Rico-Juan, J. R. and Carrasco, R. C.},
  booktitle = {{IADAT-e2007. 4th. IADAT Interntional Conference on Education}},
  pages = {40--43},
  publisher = {International Association for the Development of Advances in Technology (IADAT)},
  title = {{How to create an efficient audiovisual slide presenter}},
  volume = {1},
  year = {2007}
}
@inproceedings{Rico-Juan+Carrasco:IATED:2007,
  abstract = {In this paper, we describe a flexible approximation to create video presentations using open source tools. In the new ECTS framework the time that the student spends in a laboratory or in a classroom is reduced and, therefore it is important to assist to students with materials more flexible and interactive than classical electronic papers or books. The open source programs are a good alternative to create educational resources. Often, it not possible to do the whole video presentation with a single tool, but it is possible choose different tools to do that. We implements a method to create video presentations from PDF file (slides), audio files and flash (.swf) video files. We describe in detail how integrate this individual components to create automatically a high quality video presentation with small output files. With commercial tools, we may only use the special interfaces and formats supported by the tool. As a consequence, we cannot export all presentation to files compatible with other presentations tool. So, if we cannot export our previous presentations it is difficult to change to a new better tool. The method described here solves these problems. It allows us to work with traditional tools to create slides (PowerPoint, Open Office Impress or LaTeX), provided that we export to PDF the final slides. We can use some audio tool to create audio for each slide in different formats (WAV, OGG and MP3 are supported). If we want to include a video we need to convert it to SWF (Sockwave FlashTM) format. The result requires only a web browser with a Flash plug-in. So, we can distribute the result in standard media such as a web server, a CD or a DVD.},
  address = {Valencia, (Spain)},
  annote = {Articulo sobre la video presentaciones},
  author = {Rico-Juan, J. R. and Carrasco, R. C.},
  booktitle = {{International Technology, Education and Development Conference (INTED)}},
  http = {http://www.dlsi.ua.es/~juanra/papers/Inted2007/},
  pages = {30--31},
  publisher = {International Association of Technology, Education and Development (IATED)},
  title = {{How to do easy video presentaions using open source tools}},
  volume = {1},
  year = {2007}
}
@inbook{Rico-Juan+Inesta:PR_PDA:2006,
  abstract = {In this paper a new algorithm to describe a binary image as an ordered vector set is presented. An extension of the string edit distance is defined for computing it between a pair of ordered sets of vectors. This edit distance can be used in nearest neighbor classification tasks. The advantages of this method applied to isolated handwritten character classification are shown, compared to similar methods based in string or tree representations of the binary image.},
  annote = {Libro de la red tem{\'a}tica de Reconocimiento de Formas},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  booktitle = {{Pattern Recognition; Progress, Directions and Applications}},
  chapter = {4},
  title = {An edit distance for ordered vector sets with application to character recognition},
  editor = {{Pla F.} and {Radeva P.} and {Vitria J.}},
  month = mar,
  pages = {54--62},
  ps = {http://www.dlsi.ua.es/~juanra/papers/2006_ptsdom.ps},
  publisher = {Computer Vision Center},
  volume = {1},
  year = {2006}
}
@inproceedings{Rico-Juan+Inesta:2006,
  abstract = {Digital contours in a binary image can be described as an ordered vector set. In this paper an extension of the string edit distance is defined for its computation between a pair of ordered sets of vectors. This way, the differences between shapes can be computed in terms of editing costs. In order to achieve efficency a dominant point detection algorithm should be applied, removing redundant data before coding shapes into vectors. This edit distance can be used in nearest neighbour classification tasks. The advantages of this method applied to isolated handwritten character classification are shown, compared to similar methods based on string or tree representations of the binary image.},
  address = {Hong Kong, China},
  annote = {SSSPR 2006, Honk Kong},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  booktitle = {{Structural, Syntactic, and Statistical Pattern Recognition}},
  editor = {Yeung, D. and Kwok, J. T. and Fred, A. and Roli, F. and {de Ridder}, D.},
  impact_factor = {Core A},
  month = aug,
  number = {4109},
  pages = {200--207},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2006_ptsdom.pdf},
  publisher = {Springer},
  series = {{Lecture Notes in Computer Science}},
  title = {{Edit Distance for Ordered Vector Sets: A Case of Study}},
  year = {2006}
}
@inproceedings{Rico-Juan+Inesta:2007,
  abstract = {In this work, a normalisation of the weights utilized for combining classifiers decisions based on similarity Euclidean distance is presented. This normalisation is used by the confidence voting methods to decrease the final error rate in an OCR task. Difierent features from the characters are extracted. Each set of features is processed by a single classifier and then the decisions of the individual classifiers are combined using weighted votes, using different techniques. The error rates obtained are as good or slightly better than those obtained using a Freeman chain codes as contour representation and the string edit distance as similarity measure, but the complexity and classication time decrease dramatically.},
  address = {Wroclaw, Poland},
  annote = {CORES 2007, Wroclaw},
  author = {Rico-Juan, J. R. and I{\~n}esta, J. M.},
  booktitle = {{Computer Recognition Systems 2}},
  editor = {Kurzynski, M. and Puchala, E. and Wozniak, M. and Zolnierek, A.},
  month = oct,
  number = {45},
  pages = {405--412},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2007_combinacionClasificadores.pdf},
  publisher = {Springer},
  series = {{Advances in Soft Computing}},
  title = {{Normalisation of Confidence Voting Methods Applied to a Fast Handwritten OCR Classification}},
  year = {2007}
}
@inproceedings{Rico-Juan+Mico:2004,
  abstract = {When objects are represented by curves in a plane, highly useful information is conveyed by significant points. In this paper, we compare the use of different mobile windows to extract dominant points of handwritten characters. The error rate and classification time using an edit distance based nearest neighbour search algorithm are compared for two different cases: string and tree representation.},
  address = {Porto, Portugal},
  annote = {Congreso en Oporto, octubre 2004},
  author = {Rico-Juan, J. R. and Mic{\'o}, L.},
  booktitle = {{International Conference on Image Analysis and Recognition}},
  editor = {Campilho, A. and Kamel, M.},
  month = jun,
  number = {3211},
  pages = {440--446},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2004_nist26.r.pdf},
  publisher = {Springer},
  series = {{Lecture Notes in Computer Science}},
  title = {{Finding significant points for a handwritten classification task}},
  year = {2004}
}
@article{Rico-Juan+Calera-Rubio+Carrasco:pr:2005,
  abstract = {In this paper, we describe a generalization for tree stochastic languages of k-gram models. These models are based on the k-testable class, a subclass of the languages recognizable by ascending tree auntomata. One of the advantages of this approchis that the probabilistic model can be updated in an incremental fashion. Another feature is that backing-off schemes can be defined. As an illustration of their applicability, they have been used to compress tree data files at a better rate than string-based methods.},
  annote = {Aportacion principal de la tesis},
  author = {Rico-Juan, J. R. and Calera-Rubio, J. and Carrasco, R. C.},
  impact_factor = {2.607 - Q1 - JCR},
  journal = {Pattern Recognition},
  number = {9},
  pages = {1420--1430},
  title = {{Smoothing and Compression with Stochastic $k$-testable Tree Languages}},
  volume = {38},
  year = {2005}
}
@inproceedings{Rico-Juan+Mico:2003,
  abstract = {In pattern recognition there is a variety of applications where the patterns are classified using edit distance. In this paper we present some results comparing the use of tree and string edit distances in a handwritten character recognition task. Some experiments with different number of classes and of classifiers are done.},
  address = {Puerto Andratx, Mallorca, Spain},
  annote = {Comparativa sobre la holgura entre las tecnicas de AESA y LAESA sobre arboles y cadenas},
  author = {Rico-Juan, J. R. and Mic{\'o}, L.},
  booktitle = {{Pattern Recognition and Image Analysis}},
  editor = {Goos, G. and Hartmanis, J. and {van Leeuwen}, J.},
  month = jun,
  number = {2652},
  pages = {821--828},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2003_tree\_string.pdf},
  publisher = {Springer},
  series = {{Lecture Notes in Computer Science}},
  title = {{Some Results about the Use of Tree/String Edit Distances in a Nearest Neighbour Classification Task}},
  year = {2003}
}
@article{Rico-Juan+Mico:prl:2003,
  abstract = {Although the success rate of handwritten character recognition using a nearest neighbour technique together with edit distance is satisfactory, the exhaustive search is expensive. Some fast methods as AESA and LAESA have been proposed to find nearest neighbours in metric spaces. The average number of distances computed by these algorithms is very low and does not depend on the number of prototypes in the training set. In this paper, we compare the behaviour of these algorithms when string and tree edit distances are used.},
  annote = {Comparativa entre las tecnicas de AESA y LAESA sobre arboles y cadenas},
  author = {Rico-Juan, J. R. and Mic{\'o}, L.},
  impact_factor = {0.809 - Q3 () - JCR},
  journal = {Pattern Recognition Letters},
  pages = {1427--1436},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2003_dArbolxAESA.pdf},
  title = {{Comparison of {AESA} and {LAESA} search algorithms using string and tree edit distances}},
  volume = {24(9)},
  year = {2003}
}
@article{Carrasco+Rico-Juan:prl:2003,
  abstract = {We describe a general approach to compute a similarity measure between distributions generated by probabilistic tree automata that may be used in a number of applications in the pattern recognition field. In particular, we show how this similarity can be computed for families of structured (XML) documents can be computed. In such case, the use of regular expressions to specify the right part of the expansion rules adds some complexity to the task.},
  annote = {Clasifiacion de textos XML usando automatas de arboles},
  author = {Carrasco, R. C. and Rico-Juan, J. R.},
  impact_factor = {1.611 - Q1 - JCR},
  journal = {Pattern Recognition},
  title = {{A similarity between probabilistic tree languages: application to {XML} document families}},
  volume = {36(9)},
  year = {2003}
}
@inproceedings{Rico-Juan+Calera-Rubio:PRIS:2002,
  abstract = {Although the rate of well classified prototypes using tree-edit-distance is satisfactory, the exhaustive classification is expensive. Some fast methods as AESA and LAESA have been proposed to find nearest neighbours in metric spaces. The average number of distances computed by these algorithms does not depend on the number of prototypes. In this paper we apply these classifiers algorithms to the task of handwritten character recognition and obtain a low average error rate (2\%) and a fast classification.},
  address = {Alicante (Spain)},
  annote = {AESA y LAESA con tree-edit-distance},
  author = {Rico-Juan, J. R. and Calera-Rubio, J.},
  booktitle = {{Pattern Recognition in Information Systems}},
  editor = {I{\~n}esta, J. M. and Mic{\'o}, L.},
  pages = {326--335},
  pdf = {http://www.dlsi.ua.es/~juanra/papers/2002PRIS_TreeEditFastNN.pdf},
  publisher = {ICEIS PRESS},
  title = {{Evaluation of handwritten character recognizers using tree-edit-distance and fast nearest neighbour search}},
  year = {2002}
}
@inproceedings{Rico-Juan+Calera-Rubio+Carrasco:ICGI:2002,
  abstract = {In this paper, we present a natural generalization of $k$-gram models for tree stochastic languages based on the $k$-testable class. In this class of models, frequencies are estimated for a probabilistic regular tree grammar wich is bottom-up deterministic. One of the advantages of this approach is that the model can be updated in an incremental fashion. This method is an alternative to costly learning algorithms (as inside-outside-based methods) or algorithms that require larger samples (as many state merging/splitting methods)},
  address = {Amsterdam (Nederland)},
  annote = {k-treegrams},
  author = {Rico-Juan, J. R. and Calera-Rubio, J. and Carrasco, R. C.},
  booktitle = {{Grammatical Inference: Algorithms and Applications. ICGI 2002}},
  editor = {Adriaans, P. and Fernau, H. and {van Zaanen}, M.},
  month = sep,
  number = {2484},
  pages = {199--212},
  publisher = {Springer-Verlag},
  series = {{Lecture Notes in Artificial Intelligence}},
  title = {{Stochastic k-testable Tree Languages and Applications}},
  year = {2002}
}
@inproceedings{Rico-Juan+Calera-Rubio+Carrasco:IAPR:2000,
  abstract = {In many applications, objects are represented by a collection if unorganized points that scan the surface of the object. In such cases, an efficent way of storin this information is of interest. In this paper we present an arithmetic compression scheme that uses a tree representation of the data set and allows for better compression rates than general-purpose methods.},
  address = {Berlin},
  annote = {compresion puntos 3D},
  author = {Rico-Juan, J. R. and Calera-Rubio, J. and Carrasco, R. C.},
  booktitle = {{Advances in Pattern Recognition}},
  editor = {Ferri, F. J. and I{\~n}esta, J. M. and Amin, A. and Pudil, P.},
  pages = {457--461},
  publisher = {Springer-Verlag},
  series = {{Lecture Notes in Computer Science}},
  title = {{Lossless compression of surfaces described as points}},
  volume = {1876},
  year = {2000}
}
@inproceedings{Rico-Juan+Calera-Rubio+Carrasco:ICGI:2000,
  abstract = {In this paper, we present a natural generalization of $k$-gram models for tree stochastic languages based on the $k$-testable class. In this class of models, frequencies are estimated for a probabilistic regular tree grammar wich is bottom-up deterministic. One of the advantages of this approach is that the model can be updated in an incremental fashion. This method is an alternative to costly learning algorithms (as inside-outside-based methods) or algorithms that require larger samples (as many state merging/splitting methods)},
  address = {Lisboa (Portugal)},
  annote = {k-treegrams},
  author = {Rico-Juan, J. R. and Calera-Rubio, J. and Carrasco, R. C.},
  booktitle = {{Proceedings of 5th International Colloquium}},
  editor = {Oliveira, A. L.},
  month = sep,
  pages = {221--228},
  pdf = {ftp://altea.dlsi.ua.es/people/juanra/papers/ICGI2000.pdf},
  ps = {ftp://altea.dlsi.ua.es/people/carrasco/papers/ICGI2000.ps.gz},
  publisher = {Springer-Verlag},
  series = {{Lecture Notes in Computer Science}},
  title = {{Probabilistic k-testable tree-language}},
  volume = {1891},
  year = {2000}
}
@inproceedings{Rico-Juan:pria:1999,
  abstract = {This paper describes a geometric approach to the dificult off-line cursive handwritten word recognition problem. The method extracts and classifies feature trees from isolated handwitten words, mesasuring the distance between two trees.},
  address = {Bilbao (Spain)},
  annote = {Distancia entre {\'a}rboles. Clasificaci{\'o}n. Off-line OCR},
  author = {Rico-Juan, J. R.},
  booktitle = {{Pattern Recognition and Image Analysis: Proceedings of the VII Symposium Nacional de Reconocimiento de Formas y An{\'a}lisis de Im{\'a}genes}},
  editor = {Torres, M. I. and Sanfeliu, A.},
  month = may,
  pages = {15--16},
  pdf = {ftp://altea.dlsi.ua.es/people/juanra/papers/snrfai99.pdf},
  ps = {ftp://altea.dlsi.ua.es/people/juanra/papers/snrfai99.ps.gz},
  title = {{Off-line cursive handwritten word recognition based on tree extraction and an optimized classification distance}},
  volume = {3},
  year = {1999}
}
@inproceedings{Abreu+RicoJuan:2010,
  abstract = {This paper presents a new fast algorithm to compute an approximation to the median between two strings of characters representing a 2D shape and its application to a new classification scheme to decrease its error rate. The median string results from the application of certain edit operations from the minimum cost edit sequence to one of the original strings. The new dataset editing scheme relaxes the criterion to delete instances proposed by the Wilson Editing Proce- dure. In practice, not all instances misclassified by its near neighbors are pruned. Instead, an artificial instance is added to the dataset expecting to successfully classify the instance on the future. The new artificial instance is the median from the misclassified sample and its same-class nearest neighbor. The experiments over two widely used datasets of handwritten characters show this preprocessing scheme can reduce the classification error in about 78\% of trials.},
  author = {Rico-Juan, J. R. and Abreu, J. I},
  booktitle = {{Structural, Syntactic, and Statistical Pattern Recognition}},
  editor = {Hancok, E. R. and Wilson, R. C. and Ilkay, T. W. and Escolano, F.},
  impact_factor = {Core A},
  isbn = {978-3-642-14979-5},
  location = {Cesme, Izmir, Turkey},
  month = aug,
  number = {6218},
  pages = {748--756},
  publisher = {Springer},
  series = {{Lecture Notes in Computer Science}},
  title = {{A new editing scheme based on a fast two-string median computation applied to OCR}},
  year = {2010}
}
@article{Abreu+Rico-Juan:prl:2014,
  abstract = {This paper presents a new algorithm that can be used to compute an approximation to the median of a set of strings. The approximate median is obtained through the successive improvements of a partial solution. The edit distance from the partial solution to all the strings in the set is computed in each iteration, thus accounting for the frequency of each of the edit operations in all the positions of the approximate median. A goodness index for edit operations is later computed by multiplying their frequency by the cost. Each operation is tested, starting from that with the highest index, in order to verify whether applying it to the partial solution leads to an improvement. If successful, a new iteration begins from the new approximate median. The algorithm finishes when all the operations have been examined without a better solution being found. Comparative experiments involving Freeman chain codes encoding 2D shapes and the Copenhagen chromosome database show that the quality of the approximate median string is similar to benchmark approaches but achieves a much faster convergence.},
  author = {Abreu, J. and Rico-Juan, J. R.},
  impact_factor = {1.551 - Q2 - JCR},
  journal = {Pattern Recognition Letters},
  month = jan,
  pages = {74--80},
  title = {{A New Iterative Algorithm for Computing a Quality Approximated Median of Strings based on Edit Operations.}},
  volume = {36},
  year = {2014}
}
@article{Rico-Juan+Gallego+Calvo-Zaragoza+Valero-Mas:pr:2018,
  abstract = {In the education context, open-ended works generally entail a series of benefits as the possibility of develop original ideas and a more productive learning process to the student rather than closed-answer activities. Nevertheless, such works suppose a significant correction workload to the teacher in contrast to the latter ones that can be self-corrected. Furthermore, such workload turns to be intractable with large groups of students. In order to maintain the advantages of open-ended works with a reasonable amount of correction effort, this article proposes a novel methodology: students perform the corrections using a rubric (closed Likert scale) as a guideline in a peer-review fashion; then, their markings are automatically analyzed with statistical tools to detect possible biased scorings; finally, in the event the statistical analysis detects a biased case, the teacher is required to intervene to manually correct the assignment. This methodology has been tested on two different assignments with two heterogeneous groups of people to assess the robustness and reliability of the proposal. As a result, we obtain values over 95\% in the confidence of the intra-class correlation test (ICC) between the grades computed by our proposal and those directly resulting from the manual correction of the teacher. These figures confirm that the evaluation obtained with the proposed methodology is statistically similar to that of the manual correction of the teacher with a remarkable decrease in terms of effort.},
  title = {Statistical semi-supervised system for grading multiple peer-reviewed open-ended works},
  annote = {Revisión por pares con detección de atípicos},
  author = {Rico-Juan, J. R. and Gallego, A. J. and Valero-Mas, J. J. and Calvo-Zaragoza, J.},
  journal = {Computers & Education},
  volume = {126},
  number = {1},
  pages = {264--282},
  year = {2018},
  url = {https://doi.org/10.1016/j.compedu.2018.07.017},
  doi = {10.1016/j.compedu.2018.07.017},
  month = nov,
  impact_factor = {4.538 - Q1 - JCR},
  issn = {0360-1315}
}
@article{Calvo-Zaragoza+Valero-Mas+Rico-Juan:pr:2018,
  abstract = {While standing as one of the most widely considered and successful supervised classification algorithms, the $k$-Nearest Neighbor (kNN) classifier generally depicts a poor efficiency due to being an instance-based method. In this sense, Approximated Similarity Search (ASS) stands as a possible alternative to improve those efficiency issues at the expense of typically lowering the performance of the classifier. In this paper we take as initial point an ASS strategy based on clustering. We then improve its performance by solving issues related to instances located close to the cluster boundaries by enlarging their size and considering the use of Deep Neural Networks for learning a suitable representation for the classification task at issue. Results using a collection of eight different datasets show that the combined use of these two strategies entails a significant improvement in the accuracy performance, with a considerable reduction in the number of distances needed to classify a sample in comparison to the basic kNN rule.},
  title = {Clustering-based k-Nearest Neighbor Classification for Large-Scale Data with Neural Codes Representation},
  annote = {kNNbc: neural codes para clasificar usando clusters precalculados},
  author = {Gallego, A. J. and Calvo-Zaragoza, J. and Valero-Mas, J. J. and Rico-Juan, J. R.},
  journal = {Pattern Recognition},
  volume = {74},
  number = {1},
  pages = {531--543},
  year = {2018},
  url = {https://doi.org/10.1016/j.patcog.2017.09.038},
  doi = {10.1016/j.patcog.2017.09.038},
  impact_factor = {5.898(Q1) 14/134 - JCR},
  issn = {0031-3203}
}
@article{Castellanos+Valero-Mas+Calvo-Zaragoza+Rico-Juan:prl:2018,
  abstract = {Imbalanced data is a typical problem in the supervised classification field, which occurs when the different classes are not equally represented. This fact typically results in the classifier biasing its performance towards the class representing the majority of the elements. Many methods have been proposed to alleviate this scenario, yet all of them assume that data is represented as feature vectors. In this paper we propose a strategy to balance a dataset whose samples are encoded as strings. Our approach is based on adapting the well-known Synthetic Minority Over-sampling Technique (SMOTE) algorithm to the string space. More precisely, data generation is achieved with an iterative approach to create artificial strings within the segment between two given samples of the training set. Results with several datasets and imbalance ratios show that the proposed strategy properly deals with the problem in all cases considered.},
  title = {Oversampling imbalanced data in the string space},
  annote = {SMOTE aplicado a cadenas},
  author = {Castellanos, F. J. and Valero-Mas, J. J. and Calvo-Zaragoza, J. and Rico-Juan, J. R.},
  journal = {Pattern Recognition Letters},
  volume = {103},
  pages = {32--38},
  year = {2018},
  url = {https://doi.org/10.1016/j.patrec.2018.01.003},
  doi = {10.1016/j.patrec.2018.01.003},
  impact_factor = {1.995 - Q2 - JCR},
  issn = {0167-8655}
}
@inproceedings{Calvo-Zaragoza+Valero-Mas+Rico-Juan,
  title = {Recognition of Handwritten Music Symbols using Meta-features Obtained from Weak Classifiers based on Nearest Neighbor.},
  author = {Calvo-Zaragoza, Jorge and Valero-Mas, Jose J and Rico-Juan, Juan Ram{\'o}n},
  booktitle = {ICPRAM},
  pages = {96--104},
  year = {2017},
  url = {https://doi.org/10.5220/0006120200960104},
  doi = {10.5220/0006120200960104},
  isbn = {978-989-758-222-6}
}
@article{Calvo-Zaragoza+Rico-Juan+Gallego:soco:2020,
  abstract = {Data augmentation has become a standard step to improve the predictive power and robustness of Convolutional Neural Networks by means of the synthetic generation of new samples depicting different deformations. This step has been traditionally considered to improve the network at the training stage. In this work, however, we study the use of data augmentation at classification time. That is, the test sample is augmented, following the same procedure considered for training, and the decision is taken with an ensemble prediction over all these samples. We present comprehensive experimentation with several datasets and ensemble decisions, considering a rather generic data augmentation procedure. Our results show that performing this step is able to boost the original classification, even when the room for improvement is limited.},
  title = {Ensemble classification from deep predictions with test data augmentation},
  annote = {Nuestros métodos de ranking con ruido y 5 BD (USPS[ED], NIST3[ED], HOMUS[DTW], Penbased[HVDM], Letter[HVDM])},
  author = {Calvo-Zaragoza, J. and Rico-Juan, J. R. and Gallego, A. J.},
  journal = {Soft Computing},
  volume = {24},
  number = {2},
  pages = {1423--1433},
  year = {2020},
  url = {https://doi.org/10.1007/s00500-019-03976-7},
  doi = {10.1007/s00500-019-03976-7},
  impact_factor = {3.643 - Q2 - 49/140 - JCR},
  issn = {1432-7643}
}
@article{Rico-Juan+Gallego+Calvo-Zaragoza:cc:2019,
  abstract = {The use of peer assessment for open-ended activities has advantages for both teachers and students. Teachers might reduce the workload of the correction process and students achieve a better understanding of the subject by evaluating the activities of their peers. In order to ease the process, it is advisable to provide the students with a rubric over which performing the assessment of their peers; however, restricting themselves to provide only numerical scores is detrimental, as it prevents providing valuable feedback to others peers. Since this assessment produces two modalities of the same evaluation, namely numerical score and textual feedback, it is possible to apply automatic techniques to detect inconsistencies in the evaluation, thus minimizing the teachers' workload for supervising the whole process. This paper proposes a machine learning approach for the detection of such inconsistencies. To this end, we consider two different approaches, each of which is tested with different algorithms, in order to both evaluate the approach itself and find appropriate models to make it successful. The experiments carried out with 4 groups of students and 2 types of activities show that the proposed approach is able to yield reliable results, thus representing a valuable approach for ensuring a fair operation of the peer assessment process.},
  title = {Automatic detection of inconsistencies between numerical scores and textual feedback in peer-assessment processes with machine learning},
  annote = {Detección de incoherencias entre notas numéricas y comentarios textuales},
  author = {Rico-Juan, J. R. and Gallego, A. J. and Calvo-Zaragoza, J.},
  journal = {Computers & Education},
  volume = {140},
  number = {103609},
  year = {2019},
  url = {https://doi.org/10.1016/j.compedu.2019.103609},
  doi = {10.1016/j.compedu.2019.103609},
  month = jun,
  impact_factor = {4.538 - Q1 - JCR},
  issn = {0360-1315}
}
@article{Rico-Juan+Valero-Mas+Calvo-Zaragoza:cc:2019,
  abstract = {The k-nearest neighbour rule is commonly considered for classification tasks given its straightforward implementation and good performance in many applications. However, its efficiency represents an obstacle in real-case scenarios because the classification requires computing a distance to every single prototype of the training set. Prototype Selection (PS) is a typical approach to alleviate this problem, which focuses on reducing the size of the training set by selecting the most interesting prototypes. In this context, rank methods have been postulated as a good solution: following some heuristics, these methods perform an ordering of the prototypes according to their relevance in the classification task, which is then used to select the most relevant ones. This work presents a significant improvement of existing rank methods by proposing two extensions: i) a greater robustness against noise at label level by considering the parameter `k' of the classification in the selection process; and ii) a new parameter-free rule to select the prototypes once they have been ordered. The experiments performed in different scenarios and datasets demonstrate the goodness of these extensions. Also, it is reported that the new full approach is competitive with respect to existing PS algorithms.},
  title = {Extensions to rank-based prototype selection in k-Nearest Neighbour classification},
  annote = {Extesión de FEy NE a APS},
  author = {Rico-Juan, J. R. and Valero-Mas, J. J. Calvo-Zaragoza, J.},
  journal = {Applied Soft Computing},
  volume = {85},
  number = {105803},
  year = {2019},
  url = {https://doi.org/10.1016/j.asoc.2019.105803},
  doi = {10.1016/j.asoc.2019.105803},
  month = dec,
  impact_factor = {4.873 - Q1 - JCR},
  issn = {1568-4946}
}
@article{Gallego+Calvo-Zaragoza+Rico-Juan+:IEEEAccess:2020,
  abstract = {The increasing consideration of Convolutional Neural Networks (CNN) has not prevented the use of the k-Nearest Neighbor (kNN) method. In fact, a hybrid CNN-kNN approach is an interesting option in which the network specializes in feature extraction through its activations (Neural Codes), while the kNN has the advantage of performing a retrieval by means of similarity. However, this hybrid approach also has the disadvantages of the kNN search, and especially its high computational cost which is, in principle, undesirable for large-scale data. In this paper, we present the first comprehensive study of efficient kNN search algorithms using this hybrid CNN-kNN approach. This has been done by considering up to 16 different algorithms, each of which is evaluated with a different parametrization, in 7 datasets of heterogeneous composition. Our results show that no single algorithm is capable of covering all aspects, but rather that each family of algorithms is better suited to specific aspects of the problem. This signifies that Fast Similarity Search algorithms maintain their performance, but do not reduce the cost as much as the Data Reduction family does. In turn, the Approximated Similarity Search family is postulated as a good option when attempting to balance accuracy and efficiency. The experiments also suggest that considering statistical transformation algorithms such as Linear Discriminant Analysis might be useful in certain cases},
  title = {Insights into efficient k-Nearest Neighbor classification with Convolutional Neural Codes},
  annote = {Estudio detallado de embeddings y algoritmos de búsqueda rápida kNN en diferentes BD},
  author = {Gallego, A. J. and Calvo-Zaragoza, J. and Rico-Juan, J. R.},
  journal = {IEEE Access},
  volume = {8},
  pages = {99312--99326},
  year = {2020},
  url = {https://ieeexplore.ieee.org/document/9099517},
  doi = {10.1109/ACCESS.2020.2997387},
  month = may,
  impact_factor = {3.367 - Q2 - 65/162 - JCR (Open Access)},
  issn = {2169-3536}
}
@article{Rico-Juan+Valero-Mas+Inesta:ASOC:2020,
  abstract = {Pattern Recognition tasks in the structural domain generally exhibit high accuracy results, but their time efficiency is quite low. Furthermore, this low performance is more pronounced when dealing with instance-based classifiers, since, for each query, the entire corpus must be evaluated to find the closest prototype. In this work we address this efficiency issue for the Nearest Neighbor classifier when data are encoded as two-dimensional code sequences, and more precisely strings and sequences of vectors. For this, a set of bounds is proposed in the distance metric that avoid the calculation of unnecessary distances. Results obtained prove the effectiveness of the proposal as it reduces the classification time in percentages between 80\% and 90\% for string representations and between 60\% and 80\% for data codified as sequences of vectors with respect to their corresponding non-optimized version of the classifier.},
  title = {Bounding Edit Distance for similarity-based sequence classification on Structural Pattern Recognition},
  author = {Rico-Juan, Juan R. and Valero-Mas, José J. and Iñesta, José M.},
  journal = {Applied Soft Computing},
  volume = {97},
  pages = {106778},
  year = {2020},
  impact_factor = {6.725 - Q1 - 11/112 - JCR},
  publisher = {Elsevier}
}
@article{Ortega+Gallego+Rico-Juan:ASOC:2021,
  abstract = {This work proposes a multimodal approach with which to predict the regional Gross Domestic Product (GDP) by combining historical GDP values with the embodied information in Twitter messages concerning the current economic condition. This proposal is of great interest, since it delivers forecasts at higher frequencies than both the official statistics (published only annually at the regional level in Spain) and the existing unofficial quarterly predictions (which rely on economic indicators that are available only after months of delay). The proposed method is based on a two-stage architecture. In the first stage, a multi-task autoencoder is initially used to obtain a GDP-related representation of tweets, which are then filtered to remove outliers and to obtain the GDP prediction from the consensus of opinions. In a second stage, this result is combined with the historical GDP values of the region using a multimodal network. The method is evaluated in four different regions of Spain using the tweets written by the most relevant economists, politicians, newspapers and institutions in each one. The results show that our approach successfully learns the evolution of the GDP using only historical information and tweets, thus making it possible to provide earlier forecasts about the regional GDP. This method also makes it possible to establish which the most or least influential opinions regarding this prediction are. As an additional exercise, we have assessed how well our method predicted the effect of the COVID-19 pandemic.},
  title = {A multimodal approach for regional GDP prediction using social media activity and historical information},
  author = {Javier Ortega-Bastida and Antonio Javier Gallego and Juan Ramón Rico-Juan and Pedro Albarrán},
  doi = {10.1016/j.asoc.2021.107693},
  url = {https://doi.org/10.1016%2Fj.asoc.2021.107693},
  year = 2021,
  month = {nov},
  publisher = {Elsevier {BV}},
  volume = {111},
  pages = {107693},
  journal = {Applied Soft Computing},
  impact_factor = {5.472 - Q1 - 20/137 - JCR}
}
@article{Rico-Juan+Taltavull:ESA:2021,
  abstract = {Two sets of modelling tools are used to evaluate the precision of housing-price forecasts: machine learning and hedonic regression. Evidences on the prediction capacity of a range of methods points to the superiority of the random forest as it can calculate real-estate values with an error of less than 2%. This method also ranks the attributes that are most relevant to determining housing prices. Hedonic regression models are less precise but more robust as they can identify the housing attributes that most affect the level of housing prices. This empirical exercise adds new knowledge to the literature as it investigates the capacity of the random forest to identify the three dimensions of non-linearity which, from an economic theoretical point of view, would identify the reactions of different market agents. The intention of the robustness test is to check for these non-linear relationships using hedonic regression. The quantile tools also highlight non-linearities, depending on the price levels. The results show that a combination of techniques would add information on the unobservable (non-linear) relationships between housing prices and housing attributes on the real-estate market.},
  title = {Machine learning with explainability or spatial hedonics tools? An analysis of the asking prices in the housing market in Alicante, Spain},
  author = {Rico-Juan, Juan Ramón and Taltavull de La Paz, Paloma},
  journal = {Expert Systems with Applications},
  volume = {171},
  pages = {114590},
  year = {2021},
  publisher = {Elsevier},
  doi = {10.1016/j.eswa.2021.114590},
  url = {https://doi.org/10.1016/j.eswa.2021.114590},
  impact_factor = {6.954 - Q1 - 23/140 - JCR (Open Access)}
}
@article{Rico-Juan+Cachero+Macia:AEHE:2021,
  abstract = {Maximising the accuracy and learning of self and peer assessment activities in higher education requires instructors to make several design decisions, including whether the assessment process should be individual or collaborative, and, if collaborative, determining the number of members of each peer assessment team. In order to support this decision, a quasi-experiment was carried out in which 82 first-year students used three peer assessment modalities. A total of 1574 assessments were obtained. The accuracy of both the students’ self-assessment and their peer assessment was measured. Results show that students’ self-assessment significantly improved when groups of three were used, provided that those with the 20\% poorest performances were excluded from the analysis. This suggests that collaborative peer assessment improves learning. Peer assessment scores were more accurate than self-assessment, regardless of the modality, and the accuracy improved with the number of assessments received. Instructors need to consider the trade-off between students’ improved understanding, which favours peer assessment using groups of three, and a higher number of assessments, which, under time constraints, favours individual peer assessment.},
  doi = {10.1080/02602938.2021.1955090},
  url = {https://doi.org/10.1080%2F02602938.2021.1955090},
  year = 2021,
  month = {aug},
  publisher = {Informa {UK} Limited},
  pages = {1--18},
  author = {Juan Ramón Rico-Juan and Cristina Cachero and Hermenegilda Macià},
  title = {Influence of individual versus collaborative peer assessment on score accuracy and learning outcomes in higher education: an empirical study},
  journal = {Assessment \& Evaluation in Higher Education},
  impact_factor = {4.984 - Q1 - 17/264 - JCR (Social Science)}
}
@article{Gallego+Rico-Juan+Valero-Mas:PR:2022,
  abstract = {The k-Nearest Neighbor (kNN) algorithm is widely used in the supervised learning field and, particularly, in search and classification tasks, owing to its simplicity, competitive performance, and good statistical properties. However, its inherent inefficiency prevents its use in most modern applications due to the vast amount of data that the current technological evolution generates, being thus the optimization of k-NN-based search strategies of particular interest. This paper introduces the caKD+ algorithm, which tackles this limitation by combining the use of feature learning techniques, clustering methods, adaptive search parameters per cluster, and the use of pre-calculated K-Dimensional Tree structures, and results in a highly efficient search method. This proposal has been evaluated using 10 datasets and the results show that caKD+ significantly outperforms 16 state-of-the-art efficient search methods while still depicting such an accurate performance as the one by the exhaustive kNN search.},
  doi = {10.1016/j.patcog.2021.108356},
  url = {https://doi.org/10.1016/j.patcog.2021.108356},
  year = 2022,
  month = {feb},
  author = {Antonio Javier Gallego and Juan Ramón Rico-Juan and José Javier Valero-Más},
  journal = {Pattern Recognition},
  volume = {122},
  pages = {108356},
  title = {Efficient k-nearest neighbor search based on clustering and adaptive k values},
  impact_factor = {7.740 - Q1 - 17/140 - JCR}
}
@article{Cabrero‑García+Rico‑Juan+Oliver‑Roig:QLR:2021,
  abstract = {Purpose The global activity limitation indicator (GALI) is the only internationally agreed and harmonised participation restriction measure. We examine if GALI, as intended, is a refective measure of the domains of participation; furthermore, we determine the relative importance of these domains. Also, we investigated the consistency of response to GALI by age and gender and compared the performance of GALI with that of self-rated health (SRH). Methods We used Spanish data from the European Health and Social Integration Survey and selected adults aged 18 and over (N=13,568). Data analysis, based on logistic regression models and Shapley value decomposition, were also stratifed by age. The predictors of the models were demographic variables and restrictions in participation domains: studies, work, mobility, leisure and social activities, domestic life, and self-care. The GALI and SRH were the response variables. Results GALI was strongly associated with all participation domains (e.g. for domestic life, adjusted OR 24.34 (95\% CI 18.53–31.97) in adult under 65) and performed diferentially with age (e.g. for domestic life, adjusted OR 13.33 (95\% CI 10.42–17.03) in adults over 64), but not with gender. The relative importance of domains varied with age (e.g. work was the most important domain for younger and domestic life for older adults). The results with SRH were parallel to those of GALI, but the association of SRH with participation domains was lowest. Conclusions GALI refects well restrictions in multiple participation domains and performs diferently with age, probably because older people lower their standard of good functioning.},
  doi = {10.1007/s11136-021-03057-z},
  url = {https://doi.org/10.1007/s11136-021-03057-z},
  year = 2021,
  month = {dec},
  author = {Julio Cabrero‑García and Juan Ramón Rico‑Juan and Antonio Oliver‑Roig},
  journal = {Quality of Life Research},
  title = {Does the global activity limitation indicator measure participation restriction? Data from the European Health and Social Integration Survey in Spain},
  impact_factor = {4.147 (4.072) - Q1 - JCR}
}
@article{Oliver-Roig+Rico-Juan+Richart-Martinez+Cabrero-Garcia:CMPB:2022,
  abstract = {Background and Objective: Adequate support in maternity wards is decisive for breastfeeding outcomes during the first year of life. Quality improvement interventions require the identification of the factors influencing hospital benchmark indicators. Machine Learning (ML) models and post-hoc eXplainable Artificial Intelligence (XAI) techniques allow accurate predictions and explaining them. This study aimed to predict exclusive breastfeeding during the in-hospital postpartum stay by ML algorithms and explain the ML model's behaviour to support decision making. Methods: The dataset included 2042 mothers giving birth in 18 hospitals in Eastern Spain. We obtained information on demographics, mothers' breastfeeding experiences, clinical variables, and participating hospitals' support conditions. The outcome variable was exclusive breastfeeding during the in-hospital postpartum stay. We tested algorithms from different ML families. To evaluate the ML models, we applied 10-fold stratified cross-validation. We used the following metrics: Area under curve receiver operating characteristic (ROC AUC), Area under curve precision-recall (PR AUC), accuracy, and Brier score. After selecting the best fitting model, we calculated Shapley's additive values to assign weights to each predictor depending on its additive contribution to the outcome and to explain the predictions. Results: The XGBoost algorithms showed the best metrics (ROC AUC=0.78, PR AUC=0.86, accuracy=0.75, Brier=0.17). The main predictors of the model included, in order of importance, the pacifier use, the degree of breastfeeding self-efficacy, the previous breastfeeding experience, the birth weight, the admission of the baby to a neonatal care unit after birth, the moment of the first skin-to-skin contact between mother and baby, and the Baby-Friendly Hospital Initiative accreditation of the hospital. Specific examples for linear and nonlinear relations between main predictors and the outcome and heterogeneity of effects are presented. Also, we describe diverse individual cases showing the variation of the prediction depending on individual characteristics. Conclusions: The ML model adequately predicted exclusive breastfeeding during the in-hospital stay. Our results pointed to opportunities for improving care related to support for specific mother's groups, defined by current and previous infant feeding experiences and clinical conditions of the newborns, and the participating hospitals' support conditions. Also, XAI techniques allowed identifying non-linearity relations and effect's heterogeneity, explaining specific cases' risk variations.},
  title = {Predicting exclusive breastfeeding in maternity wards using Machine Learning techniques.},
  author = {Oliver-Roig, Antonio and Rico-Juan, Juan Ramón and Richart-Martínez, Miguel and Cabrero-García, Julio},
  journal = {Computer Methods and Programs in Biomedicine},
  doi = {10.1016/j.cmpb.2022.106837},
  pages = {106837},
  year = {2022},
  month = {jun},
  publisher = {Elsevier},
  impact_factor = {5.428(Q1) 22/111 - JCR}
}
@article{Cachero+Rico-Juan+Macià:ESA:2023,
  abstract = {The successful instructional design of self and peer assessment in higher education poses several challenges that instructors need to be aware of. One of these is the influence of students’ personalities on their intention to adopt peer assessment. This paper presents a quasi-experiment in which 85 participants, enrolled in the first-year of a Computer Engineering programme, were assessed regarding their personality and their acceptance of three modalities of peer assessment (individual, pairs, in threes). Following a within-subjects design, the students applied the three modalities, in a different order, with three different activities. An analysis of the resulting 1195 observations using ML techniques shows how the Random Forest algorithm yields significantly better predictions for three out of the four adoption variables included in the study. Additionally, the application of a set of eXplainable Artificial Intelligence (XAI) techniques shows that Agreeableness is the best predictor of Usefulness and Ease of Use, while Extraversion is the best predictor of Compatibility, and Neuroticism has the greatest impact on global Intention to Use. The discussion highlights how, as it happens with other innovations in educational processes, low levels of Consciousness is the most consistent predictor of resistance to the introduction of peer assessment processes in the classroom. Also, it stresses the value of peer assessment to augment the positive feelings of students scoring high on Neuroticism, which could lead to better performance. Finally, the low impact of the peer assessment modality on student perceptions compared to personality variables is debated.},
  author = {Cristina Cachero and Juan Ramón Rico-Juan and Hermenegilda Macià},
  title = {Influence of personality and modality on peer assessment evaluation perceptions using Machine Learning techniques},
  journal = {Expert Systems with Applications},
  volume = {213},
  pages = {119150},
  year = {2023},
  month = {mar},
  issn = {0957-4174},
  doi = {https://doi.org/10.1016/j.eswa.2022.119150},
  impact_factor = {8.665 (8.093) - Q1 - 21/144(Open Access)}
}
@article{Rico-Juan+Sánchez-Cartagena+Valero-Mas+Gallego:TLT:2023,
  abstract = {Online Judge (OJ) systems are typically considered within programming-related courses as they yield fast and objective assessments of the code developed by the students. Such an evaluation generally provides a single decision based on a rubric, most commonly whether the submission successfully accomplished the assignment. Nevertheless, since in an educational context such information may be deemed insufficient, it would be beneficial for both the student and the instructor to receive additional feedback about the overall development of the task. This work aims to tackle this limitation by considering the further exploitation of the information gathered by the OJ and automatically inferring feedback for both the student and the instructor. More precisely, we consider the use of learning-based schemes—particularly, Multi-Instance Learning and classical Machine Learning formulations—to model student behaviour. Besides, Explainable Artificial Intelligence is contemplated to provide human-understandable feedback. The proposal has been evaluated considering a case of study comprising 2,500 submissions from roughly 90 different students from a programming-related course in a Computer Science degree. The results obtained validate the proposal: the model is capable of significantly predicting the user outcome (either passing or failing the assignment) solely based on the behavioural pattern inferred by the submissions provided to the OJ. Moreover, the proposal is able to identify prone-to-fail student groups and profiles as well as other relevant information, which eventually serves as feedback to both the student and the instructor.},
  author = {Rico-Juan, Juan Ramón and Sánchez-Cartagena, Víctor M. and Valero-Mas, Jose J. and Gallego, Antonio Javier},
  journal = {IEEE Transactions on Learning Technologies},
  title = {Identifying Student Profiles Within Online Judge Systems Using Explainable Artificial Intelligence},
  year = {2023},
  volume = {},
  number = {},
  month = {jan},
  pages = {1-14},
  doi = {10.1109/TLT.2023.3239110},
  publisher = {IEEE Education Society},
  impact_factor = {4.433 (4.414) - Q2 - 41/112}
}
@article{Llorca-Schenk+Rico-Juan+Sanchez-Lozano:ESA:2023,
  abstract = {This paper shows the development of a tool with which to solve the most critical aspect of the porthole die design problem using a predictive model based on machine learning (ML). The model relies on a large amount of geometrical data regarding successful porthole die designs, information on which was obtained thanks to a collaboration with a leading extrusion company. In all cases, the dies were made of H-13 hot work steel and the billet material was 6063 aluminium alloy. The predictive model was chosen from a series of probes with different algorithms belonging to various ML families, which were applied to the analysis of geometrical data corresponding to 596 ports from 88 first trial dies. Algorithms based on the generation of multiple decision trees together with the boosting technique obtained the most promising results, the best by far being the CatBoost algorithm. The explainability of this model is based on a post-hoc approach using the SHAP (SHapley Additive exPlanations) tool. The results obtained with this ML-based model are notably better than those of a previous model based on linear regression as regards both the R2 metric and the results obtained with the application examples. An additional practical advantage is its explainability, which is a great help when deciding the best way in which to adjust an initial design to the predictive model. This ML-based model is, therefore, an optimal means to integrate the experience and know-how accumulated through many designs over time in order to apply it to new designs. It will also provide an aid in generating the starting point for the design of high-difficulty dies, in order to minimise the number of FEM (finite element method) simulation/correction iterations required until an optimal solution is achieved. It is not aimed to eliminate FEM simulation from the design tasks, but rather to help improve and accelerate the whole process of designing porthole dies. The work presented herein addresses a validation model for a very common porthole die typology: four cavity and four port per cavity dies for 6xxx series aluminium alloys. However, a wide range of research regarding the generalisation of this model or its extension to other porthole die typologies must still be carried out.},
  title = {Designing porthole aluminium extrusion dies on the basis of eXplainable Artificial Intelligence},
  author = {Llorca-Schenk, Juan and Rico-Juan, Juan Ram{\'o}n and Sanchez-Lozano, Miguel},
  journal = {Expert Systems with Applications},
  pages = {119808},
  year = {2023},
  publisher = {Elsevier},
  doi = {10.1016/j.eswa.2023.119808},
  impact_factor = {8.665 (8.093) - Q1 - 21/144}
}
@article{Navarro-Soria+Rico-Juan+Juarez-Ruiz+Lavigne-Cervan:ANC:2024,
  abstract = { Accurate assessment of Attention Deficit Hyperactivity Disorder (ADHD) is crucial for the effective treatment of affected individuals. Traditionally, psychometric tests such as the WISC-IV have been utilized to gather evidence and identify patterns or factors contributing to ADHD diagnosis. However, in recent years, the use of machine learning (ML) models in conjunction with post-hoc eXplainable Artificial Intelligence (XAI) techniques has improved our ability to make precise predictions and provide transparent explanations. The objective of this study is twofold: firstly, to predict the likelihood of an individual receiving an ADHD diagnosis using ML algorithms, and secondly, to offer interpretable insights into the decision-making process of the ML model. The dataset under scrutiny comprises 694 cases collected over the past decade in Spain, including information on age, gender, and WISC-IV test scores. The outcome variable is the professional diagnosis. Diverse ML algorithms representing various learning styles were rigorously evaluated through a stratified 10-fold cross-validation, with performance assessed using key metrics, including accuracy, area under the receiver operating characteristic curve, sensitivity, and specificity. Models were compared using both the full set of initial features and a well-suited wrapper-type feature selection algorithm (Boruta). Following the identification of the most suitable model, Shapley additive values were computed to assign weights to each predictor based on their additive contribution to the outcome and to elucidate the predictions. Strikingly, a reduced set of 8 out of the initial 20 variables produced results comparable to using the full feature set. Among the ML models tested, the Random Forest algorithm outperformed others on most metrics (ACC = 0.90, AUC = 0.94, Sensitivity = 0.91, Specificity = 0.92). Notably, the principal predictors, ranked by importance, included GAI – CPI, WMI, CPI, PSI, VCI, WMI – PSI, PRI, and LN. Individual case examples exhibit variations in predictions depending on unique characteristics, including instances of false positives and negatives. Our ML model adeptly predicted ADHD diagnoses in 90\% of cases, with potential for further enhancement by expanding our database. Furthermore, the use of XAI techniques enables the elucidation of salient factors in individual cases, thereby aiding inexperienced professionals in the diagnostic process and facilitating comparison with expert assessments. It is important to note that this tool is designed to support the ADHD diagnostic process, where the medical professional always has the final say in decision-making. },
  author = {Navarro-Soria, Ignasi and Rico-Juan, Juan Ramón and Juárez-Ruiz de Mier, Rocío and Lavigne-Cervan, Rocío},
  title = {Prediction of attention deficit hyperactivity disorder based on explainable artificial intelligence},
  journal = {Applied Neuropsychology: Child},
  volume = {0},
  number = {0},
  pages = {1--14},
  year = {2024},
  publisher = {Routledge},
  doi = {10.1080/21622965.2024.2336019},
  url = {https://doi.org/10.1080/21622965.2024.2336019},
  impact_factor = {1.7 (1.7) - Q3 - 58/81}
}

This file was generated by bibtex2html 1.99.