@Proceedings{COPA2018,
title = {Proceedings of Machine Learning Research},
booktitle = {Proceedings of Machine Learning Research},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
volume = 91
}
@InProceedings{gammerman18a,
title = {Preface},
author = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {1--2},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/gammerman18a/gammerman18a.pdf},
url = {http://proceedings.mlr.press/v91/gammerman18a.html},
abstract = {}
}
@InProceedings{johansson18a,
title = {Venn predictors for well-calibrated probability estimation trees},
author = {Ulf Johansson and Tuwe Löfström and Håkan Sundell and Henrik Linusson and Anders Gidenstam and Henrik Boström},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {3--14},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/johansson18a/johansson18a.pdf},
url = {http://proceedings.mlr.press/v91/johansson18a.html},
abstract = {Successful use of probabilistic classification requires well-calibrated probability estimates, i.e., the predicted class probabilities must correspond to the true probabilities. The standard solution is to employ an additional step, transforming the outputs from a classifier into probability estimates. In this paper, Venn predictors are compared to Platt scaling and isotonic regression, for the purpose of producing well-calibrated probabilistic predictions from decision trees. The empirical investigation, using 22 publicly available data sets, showed that the probability estimates from the Venn predictor were extremely well-calibrated. In fact, in a direct comparison using the accepted reliability metric, the Venn predictor estimates were the most exact on every data set.}
}
@InProceedings{nouretdinov18a,
title = {Inductive {V}enn-{A}bers predictive distribution},
author = {Ilia Nouretdinov and Denis Volkhonskiy and Pitt Lim and Paolo Toccaceli and Alexander Gammerman},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {15--36},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/nouretdinov18a/nouretdinov18a.pdf},
url = {http://proceedings.mlr.press/v91/nouretdinov18a.html},
abstract = {Venn predictors are a distribution-free probabilistic prediction framework that transforms the output of a scoring classifier into a (multi-)probabilistic prediction that has calibration guarantees, with the only requirement of an i.i.d. assumption for calibration and test data. In this paper, we extend the framework from classification (where probabilities are predicted for a discrete number of labels) to regression (where labels form a continuum). We show how Venn Predictors can be applied on top of any regression method to obtain calibrated predictive distributions, without requiring assumptions beyond i.i.d. of calibration and test sets. This is contrasted with methods such as Bayesian Linear Regression, for which the calibration guarantee instead relies on specific probabilistic assumptions on the distribution of the data. The adaptation of Venn Machine to regression required a theoretical analysis of the transductive and inductive forms of the predictor. We identify potential consistency problems and provide solutions for them. Finally, to illustrate their advantages, we apply regression Venn Predictors to the medical problem of predicting the survival time after Percutaneous Coronary Intervention, a potentially risky procedure that improves blood flow to a patient’s heart. The predictive distributions obtained with this method allow a variety of interpretations that include probability of survival time exceeding a chosen threshold or the shortest survival time guaranteed with a given probability.}
}
@InProceedings{vovk18a,
title = {Cross-conformal predictive distributions},
author = {Vladimir Vovk and Ilia Nouretdinov and Valery Manokhin and Alexander Gammerman},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {37--51},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/vovk18a/vovk18a.pdf},
url = {http://proceedings.mlr.press/v91/vovk18a.html},
abstract = {Conformal predictive systems are a recent modification of conformal predictors that output, in regression problems, probability distributions for labels of test observations rather than set predictions. The extra information provided by conformal predictive systems may be useful, e.g., in decision making problems. Conformal predictive systems inherit the relative computational inefficiency of conformal predictors. In this paper we discuss two computationally efficient versions of conformal predictive systems, which we call split conformal predictive systems and cross-conformal predictive systems, and discuss their advantages and limitations.}
}
@InProceedings{vovk18b,
title = {Conformal predictive decision making},
author = {Vladimir Vovk and Claus Bendtsen},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {52--62},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/vovk18b/vovk18b.pdf},
url = {http://proceedings.mlr.press/v91/vovk18b.html},
abstract = {This note explains how conformal predictive distributions can be used for the purpose of decision-making. Namely, a major limitation of conformal predictive distributions is that, at this time, they are only applicable to regression problems, where the label is a real number; however, this does not prevent them from being used in a general problem of decision making. The resulting methodology of conformal predictive decision making is illustrated on a small benchmark data set. Our main theoretical observation is that there exists an asymptotically efficient predictive decision-making system which can be obtained by using our methodology (and therefore, satisfying the standard property of validity).}
}
@InProceedings{korotin18a,
title = {Aggregating strategies for long-term forecasting},
author = {Alexander Korotin and Vladimir V’yugin and Evgeny Burnaev},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {63--82},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/korotin18a/korotin18a.pdf},
url = {http://proceedings.mlr.press/v91/korotin18a.html},
abstract = {The article is devoted to investigating an application of aggregating algorithms to the problem of the long-term forecasting. We examine the classic aggregating algorithms based on the exponential reweighing. For the general Vovk’s aggregating algorithm we provide its probabilistic interpretation and its generalization for the long-term forecasting. For the special basic case of Vovk’s algorithm we provide two its modifications for the long-term forecasting. The first one is theoretically close to an optimal algorithm and is based on replication of independent copies. It provides the time-independent regret bound with respect to the best expert in the pool. The second one is not optimal but is more practical (explicitly models dependencies in observations) and has $O(\sqrtT)$ regret bound, where $T$ is the length of the game.}
}
@InProceedings{zaytsev18a,
title = {Interpolation error of {G}aussian process regression for misspecified case},
author = {Alexey Zaytsev and Evgenya Romanenkova and Dmitry Ermilov},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {83--95},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/zaytsev18a/zaytsev18a.pdf},
url = {http://proceedings.mlr.press/v91/zaytsev18a.html},
abstract = {An interpolation error is an integral of the squared error of a regression model over a domain of interest. We consider the interpolation error for the case of misspecified Gaussian process regression: a used covariance function differs from a true one. We derive the interpolation error for a grid design of experiments for an arbitrary covariance function. Then we consider particular types of covariance functions from theoretical and practical points of view. For $\textitMatern_1/2$ covariance function poor estimation of parameters only slightly affects the quality of interpolation. For the most common covariance functions including $\textitMatern_3/2$ and squared exponential covariance functions poor choose of parameters of covariance functions leads to a bad quality of interpolation.}
}
@InProceedings{zhou18a,
title = {Conformal feature-selection wrappers for instance transfer},
author = {Shuang Zhou and Evgueni Smirnov and Gijs Schoenmakers and Ralf Peeters and Tao Jiang},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {96--113},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/zhou18a/zhou18a.pdf},
url = {http://proceedings.mlr.press/v91/zhou18a.html},
abstract = {In this paper we propose a new method of conformal feature-selection wrappers for instance transfer (CFSWIT). Given target and source data, the method optimally selects features and source data that are relevant for a classification model. The CFSWIT method is model-independent. It was tested experimentally for several types of classifiers. The experiments show that the CFSWIT method is capable of outperforming standard instance transfer methods.}
}
@InProceedings{nguyen18a,
title = {Cover your cough: detection of respiratory events with confidence using a smartwatch},
author = {Khuong An Nguyen and Zhiyuan Luo},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {114--131},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/nguyen18a/nguyen18a.pdf},
url = {http://proceedings.mlr.press/v91/nguyen18a.html},
abstract = {Cough and sneeze are the most common means to spread respiratory diseases amongst humans. Existing approaches to detect coughing and sneezing events are either intrusive or do not provide any reliability measure. This paper offers a novel proposal to reliably and non-intrusively detect such events using a smartwatch as the underlying hardware, Conformal Prediction as the underlying software. We rigorously analysed the performances of our proposal with the Harvard ESC Environmental Sound dataset, and real coughing samples taken from a smartwatch in different ambient noises.}
}
@InProceedings{ahlberg18a,
title = {Using {V}enn-{Abers} predictors to assess cardio-vascular risk},
author = {Ernst Ahlberg and Ruben Buendia and Lars Carlsson},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {132--146},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/ahlberg18a/ahlberg18a.pdf},
url = {http://proceedings.mlr.press/v91/ahlberg18a.html},
abstract = {This study investigates a method for predicting compound risk based on in vitro assay data and estimated $C_\textitmax$, the maximum concentration of a drug in the body. The method makes use of Venn-Abers predictors and Support Vector Machines to compute compound risk with respect to a biological target. The method has been applied to in vitro ion-channel data generated to assess cardiac risk and introduces a more intuitive way to reflect cardiac risk.}
}
@InProceedings{gauraha18a,
title = {Conformal prediction in learning under privileged information paradigm with applications in drug discovery},
author = {Niharika Gauraha and Lars Carlsson and Ola Spjuth},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {147--156},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/gauraha18a/gauraha18a.pdf},
url = {http://proceedings.mlr.press/v91/gauraha18a.html},
abstract = {This paper explores conformal prediction in the learning under privileged information (LUPI) paradigm. We use the SVM$+$ realization of LUPI in an inductive conformal predictor, and apply it to the MNIST benchmark dataset and three datasets in drug discovery. The results show that using privileged information produces valid models and improves efficiency compared to standard SVM, however the improvement varies between the tested datasets and is not substantial in the drug discovery applications. More importantly, using SVM$+$ in a conformal prediction framework enables valid prediction intervals at specified significance levels.}
}
@InProceedings{cherubin18a,
title = {Exchangeability martingales for selecting features in anomaly detection},
author = {Giovanni Cherubin and Adrian Baldwin and Jonathan Griffin},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {157--170},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/cherubin18a/cherubin18a.pdf},
url = {http://proceedings.mlr.press/v91/cherubin18a.html},
abstract = {We consider the problem of feature selection for unsupervised anomaly detection (AD) in time-series, where only normal examples are available for training. We develop a method based on exchangeability martingales that only keeps features that exhibit the same pattern (i.e., are i.i.d.) under normal conditions of the observed phenomenon. We apply this to the problem of monitoring a Windows service and detecting anomalies it exhibits if compromised; results show that our method: i) strongly improves the AD system’s performance, and ii) it reduces its computational complexity. Furthermore, it gives results that are easy to interpret for analysts, and it potentially increases robustness against AD evasion attacks.}
}
@InProceedings{eliades18a,
title = {Detecting seizures in {EEG} recordings using conformal prediction},
author = {Charalambos Eliades and Harris Papadopoulos},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {171--186},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/eliades18a/eliades18a.pdf},
url = {http://proceedings.mlr.press/v91/eliades18a.html},
abstract = {This study examines the use of the Conformal Prediction (CP) framework for the provision of confidence information in the detection of seizures in electroencephalograph (EEG) recordings. The detection of seizures is an important task since EEG recordings of seizures are of primary interest in the evaluation of epileptic patients. However, manual review of long-term EEG recordings for detecting and analyzing seizures that may have occurred is a time-consuming process. Therefore a technique for automatic detection of seizures in such recordings is highly beneficial since it can be used to significantly reduce the amount of data in need of manual review. Additionally, due to the infrequent and unpredictable occurrence of seizures, having high sensitivity is crucial for seizure detection systems. This is the main motivation for this study, since CP can be used for controlling the error rate of predictions and therefore guaranteeing an upper bound on the frequency of false negatives.}
}
@InProceedings{raab18a,
title = {Transfer learning for the probabilistic classification vector machine},
author = {Christoph Raab and Frank-Michael Schleif},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {187--200},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/raab18a/raab18a.pdf},
url = {http://proceedings.mlr.press/v91/raab18a.html},
abstract = {Transfer learning is focused on the reuse of supervised learning models in a new context. Prominent applications can be found in robotics, image processing or web mining. In these fields, the learning scenarios are naturally changing but often remain related to each other motivating the reuse of existing supervised models. Current transfer learning methods are not well suited and used for sparse and interpretable models. Sparsity is very desirable if the methods have to be used in technically limited environments and interpretability is getting more critical due to privacy regulations. In this work, we show how transfer learning can be integrated into the sparse and interpretable probabilistic classification vector machine and it is compared with different standard benchmarks in the field.}
}
@InProceedings{buendia18a,
title = {Venn-{A}bers predictors for improved compound iterative screening in drug discovery},
author = {Ruben Buendia and Ola Engkvist and Lars Carlsson and Thierry Kogej and Ernst Ahlberg},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {201--219},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/buendia18a/buendia18a.pdf},
url = {http://proceedings.mlr.press/v91/buendia18a.html},
abstract = {Iterative screening, where selected hits from a given round of screening are used to enrich a compound activity prediction model for the next iteration, enables more efficient screening campaigns. The portion of the compound library that should be screened in each iteration is often arbitrarily decided. This is because no accurate information between screening size and the number of hits to be retrieved exists. In this article, a novel method based on Venn-Abers predictors was used to determine the optimal number of compounds to be screened in order to get a desired number of hits. We found that Venn-Abers predictors provide accurate information to support a reliable and flexible decision about the portion size of the compound library that should be screened in each iteration. In addition, the method exhibited great ability in producing an enriched subset in terms of hits and their diversity.}
}
@InProceedings{neeven18a,
title = {Conformal stacked weather forecasting},
author = {Jelmer Neeven and Evgueni Smirnov},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {220--233},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/neeven18a/neeven18a.pdf},
url = {http://proceedings.mlr.press/v91/neeven18a.html},
abstract = {In this paper we propose to apply the stacking method to aggregating multi-output predictions from different weather-forecasting domains (websites). Depending on the aggregating procedure (non-conformal/conformal), the results can be bare multi-output predictions or multi-output prediction regions. The experiments show the applicability of the stacking method on real data related to eight weather-forecasting domains.}
}
@InProceedings{kuleshov18a,
title = {Conformal prediction in manifold learning},
author = {Alexander Kuleshov and Alexander Bernstein and Evgeny Burnaev},
booktitle = {Proceedings of the Seventh Workshop on Conformal and Probabilistic Prediction and Applications},
pages = {234--253},
year = {2018},
editor = {Alex Gammerman and Vladimir Vovk and Zhiyuan Luo and Evgueni Smirnov and Ralf Peeters},
volume = {91},
series = {Proceedings of Machine Learning Research},
address = {},
month = {11--13 Jun},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v91/kuleshov18a/kuleshov18a.pdf},
url = {http://proceedings.mlr.press/v91/kuleshov18a.html},
abstract = {The paper presents a geometrically motivated view on conformal prediction applied to nonlinear multi-output regression tasks for obtaining valid measure of accuracy of Manifold Learning Regression algorithms. A considered regression task is to estimate an unknown smooth mapping $\mathbff$ from $q$-dimensional inputs $\mathbfx ∈\mathbfX$ to $m$-dimensional outputs $\mathbfy = \mathbff(\mathbfx)$ based on training dataset $\mathbfZ_(n)$ consisting of “input-output” pairs ${Z_i = (\mathbfx_i, \mathbfy_i = \mathbff(\mathbfx_i))^\textrmT, i = 1, 2, ..., n}$. Manifold Learning Regression (MLR) algorithm solves this task using Manifold learning technique. At first, unknown $q$-dimensional Regression manifold $\mathbfM(\mathbff) = {(\mathbfx,\mathbff(\mathbfx))^\textrmT∈\mathrmR^q+m: \mathbfx ∈\mathbfX ⊂\mathrmR^q}$, embedded in ambient $(q+m)$-dimensional space, is estimated from the training data $\mathbfZ_(n)$, sampled from this manifold. The constructed estimator $\mathbfM_\textMLR$, which is also $q$-dimensional manifold embedded in ambient space $\textrmR^q+m$, is close to $\mathbfM$ in terms of Hausdorff distance. After that, an estimator $\mathbff_\textMLR$ of the unknown function $\mathbff$, mapping arbitrary input $\mathbfx ∈\mathbfX$ to output $\mathbff_\textrmMLR(\mathbfx)$, is constructed as the solution to the equation $\mathbfM(\mathbff_\textrmMLR) = \mathbfM_\textMLR$. Conformal prediction allows constructing a prediction region for an unknown output $\mathbfy = \mathbff(\mathbfx)$ at Out-of-Sample input point $\mathbfx$ for a given confidence level using given nonconformity measure, characterizing to which extent an example $Z = (\mathbfx, \mathbfy)^\textrmT$ is different from examples in the known dataset $\mathbfZ_(n)$. The paper proposes a new nonconformity measure based on MLR estimator using an analog of Bregman distance.}
}