@article{CiliaLandwehrPasserini2011, author = {Cilia, Elisa and Landwehr, Niels and Passerini, Andrea}, title = {Relational feature mining with hierarchical multitask kFOIL}, series = {Fundamenta informaticae}, volume = {113}, journal = {Fundamenta informaticae}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {0169-2968}, doi = {10.3233/FI-2011-604}, pages = {151 -- 177}, year = {2011}, abstract = {We introduce hierarchical kFOIL as a simple extension of the multitask kFOIL learning algorithm. The algorithm first learns a core logic representation common to all tasks, and then refines it by specialization on a per-task basis. The approach can be easily generalized to a deeper hierarchy of tasks. A task clustering algorithm is also proposed in order to automatically generate the task hierarchy. The approach is validated on problems of drug-resistance mutation prediction and protein structural classification. Experimental results show the advantage of the hierarchical version over both single and multi task alternatives and its potential usefulness in providing explanatory features for the domain. Task clustering allows to further improve performance when a deeper hierarchy is considered.}, language = {en} } @article{ThonLandwehrDeRaedt2011, author = {Thon, Ingo and Landwehr, Niels and De Raedt, Luc}, title = {Stochastic relational processes efficient inference and applications}, series = {Machine learning}, volume = {82}, journal = {Machine learning}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-010-5213-8}, pages = {239 -- 272}, year = {2011}, abstract = {One of the goals of artificial intelligence is to develop agents that learn and act in complex environments. Realistic environments typically feature a variable number of objects, relations amongst them, and non-deterministic transition behavior. While standard probabilistic sequence models provide efficient inference and learning techniques for sequential data, they typically cannot fully capture the relational complexity. On the other hand, statistical relational learning techniques are often too inefficient to cope with complex sequential data. In this paper, we introduce a simple model that occupies an intermediate position in this expressiveness/efficiency trade-off. It is based on CP-logic (Causal Probabilistic Logic), an expressive probabilistic logic for modeling causality. However, by specializing CP-logic to represent a probability distribution over sequences of relational state descriptions and employing a Markov assumption, inference and learning become more tractable and effective. Specifically, we show how to solve part of the inference and learning problems directly at the first-order level, while transforming the remaining part into the problem of computing all satisfying assignments for a Boolean formula in a binary decision diagram. We experimentally validate that the resulting technique is able to handle probabilistic relational domains with a substantial number of objects and relations.}, language = {en} } @article{SawadeBickelvonOertzenetal.2013, author = {Sawade, Christoph and Bickel, Steffen and von Oertzen, Timo and Scheffer, Tobias and Landwehr, Niels}, title = {Active evaluation of ranking functions based on graded relevance}, series = {Machine learning}, volume = {92}, journal = {Machine learning}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-013-5372-5}, pages = {41 -- 64}, year = {2013}, abstract = {Evaluating the quality of ranking functions is a core task in web search and other information retrieval domains. Because query distributions and item relevance change over time, ranking models often cannot be evaluated accurately on held-out training data. Instead, considerable effort is spent on manually labeling the relevance of query results for test queries in order to track ranking performance. We address the problem of estimating ranking performance as accurately as possible on a fixed labeling budget. Estimates are based on a set of most informative test queries selected by an active sampling distribution. Query labeling costs depend on the number of result items as well as item-specific attributes such as document length. We derive cost-optimal sampling distributions for the commonly used performance measures Discounted Cumulative Gain and Expected Reciprocal Rank. Experiments on web search engine data illustrate significant reductions in labeling costs.}, language = {en} }