@article{ThienenNoweskiRauthetal.2012, author = {Thienen, Julia von and Noweski, Christine and Rauth, Ingo and Meinel, Christoph and Lange, Sabine}, title = {If you want to know who are, tell me where you are : the importance of places}, year = {2012}, language = {en} } @article{Thomas2010, author = {Thomas, Ivonne}, title = {Reliable digital identities for SOA and the Web}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{ThonLandwehrDeRaedt2011, author = {Thon, Ingo and Landwehr, Niels and De Raedt, Luc}, title = {Stochastic relational processes efficient inference and applications}, series = {Machine learning}, volume = {82}, journal = {Machine learning}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-010-5213-8}, pages = {239 -- 272}, year = {2011}, abstract = {One of the goals of artificial intelligence is to develop agents that learn and act in complex environments. Realistic environments typically feature a variable number of objects, relations amongst them, and non-deterministic transition behavior. While standard probabilistic sequence models provide efficient inference and learning techniques for sequential data, they typically cannot fully capture the relational complexity. On the other hand, statistical relational learning techniques are often too inefficient to cope with complex sequential data. In this paper, we introduce a simple model that occupies an intermediate position in this expressiveness/efficiency trade-off. It is based on CP-logic (Causal Probabilistic Logic), an expressive probabilistic logic for modeling causality. However, by specializing CP-logic to represent a probability distribution over sequences of relational state descriptions and employing a Markov assumption, inference and learning become more tractable and effective. Specifically, we show how to solve part of the inference and learning problems directly at the first-order level, while transforming the remaining part into the problem of computing all satisfying assignments for a Boolean formula in a binary decision diagram. We experimentally validate that the resulting technique is able to handle probabilistic relational domains with a substantial number of objects and relations.}, language = {en} } @article{TiwariPrakashGrossetal.2020, author = {Tiwari, Abhishek and Prakash, Jyoti and Groß, Sascha and Hammer, Christian}, title = {A large scale analysis of Android}, series = {The journal of systems and software}, volume = {170}, journal = {The journal of systems and software}, publisher = {Elsevier}, address = {New York}, issn = {0164-1212}, doi = {10.1016/j.jss.2020.110775}, pages = {17}, year = {2020}, abstract = {Many Android applications embed webpages via WebView components and execute JavaScript code within Android. Hybrid applications leverage dedicated APIs to load a resource and render it in a WebView. Furthermore, Android objects can be shared with the JavaScript world. However, bridging the interfaces of the Android and JavaScript world might also incur severe security threats: Potentially untrusted webpages and their JavaScript might interfere with the Android environment and its access to native features. No general analysis is currently available to assess the implications of such hybrid apps bridging the two worlds. To understand the semantics and effects of hybrid apps, we perform a large-scale study on the usage of the hybridization APIs in the wild. We analyze and categorize the parameters to hybridization APIs for 7,500 randomly selected and the 196 most popular applications from the Google Playstore as well as 1000 malware samples. Our results advance the general understanding of hybrid applications, as well as implications for potential program analyses, and the current security situation: We discovered thousands of flows of sensitive data from Android to JavaScript, the vast majority of which could flow to potentially untrustworthy code. Our analysis identified numerous web pages embedding vulnerabilities, which we exemplarily exploited. Additionally, we discovered a multitude of applications in which potentially untrusted JavaScript code may interfere with (trusted) Android objects, both in benign and malign applications.}, language = {en} } @article{TranPontelliBalduccinietal.2022, author = {Tran, Son Cao and Pontelli, Enrico and Balduccini, Marcello and Schaub, Torsten}, title = {Answer set planning}, series = {Theory and practice of logic programming}, journal = {Theory and practice of logic programming}, publisher = {Cambridge University Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068422000072}, pages = {73}, year = {2022}, abstract = {Answer Set Planning refers to the use of Answer Set Programming (ASP) to compute plans, that is, solutions to planning problems, that transform a given state of the world to another state. The development of efficient and scalable answer set solvers has provided a significant boost to the development of ASP-based planning systems. This paper surveys the progress made during the last two and a half decades in the area of answer set planning, from its foundations to its use in challenging planning domains. The survey explores the advantages and disadvantages of answer set planning. It also discusses typical applications of answer set planning and presents a set of challenges for future research.}, language = {en} } @article{TroegerMerzky2014, author = {Troeger, Peter and Merzky, Andre}, title = {Towards standardized job submission and control in infrastructure clouds}, series = {Journal of grid computing}, volume = {12}, journal = {Journal of grid computing}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-7873}, doi = {10.1007/s10723-013-9275-2}, pages = {111 -- 125}, year = {2014}, abstract = {The submission and management of computational jobs is a traditional part of utility computing environments. End users and developers of domain-specific software abstractions often have to deal with the heterogeneity of such batch processing systems. This lead to a number of application programming interface and job description standards in the past, which are implemented and established for cluster and Grid systems. With the recent rise of cloud computing as new utility computing paradigm, the standardized access to batch processing facilities operated on cloud resources becomes an important issue. Furthermore, the design of such a standard has to consider a tradeoff between feature completeness and the achievable level of interoperability. The article discusses this general challenge, and presents some existing standards with traditional cluster and Grid computing background that may be applicable to cloud environments. We present OCCI-DRMAA as one approach for standardized access to batch processing facilities hosted in a cloud.}, language = {en} } @article{Uflacker2010, author = {Uflacker, Matthias}, title = {Computational analysis of virtual team collaboration in teh early stages of engineering design}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{UflackerKowarkZeier2011, author = {Uflacker, Matthias and Kowark, Thomas and Zeier, Alexander}, title = {An instrument for real-time design interaction capture}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{vanHoolandVerborghDeWildeetal.2013, author = {van Hooland, Seth and Verborgh, Ruben and De Wilde, Max and Hercher, Johannes and Mannens, Erik and Van de Walle, Rik}, title = {Evaluating the success of vocabulary reconciliation for cultural heritage collections}, series = {Journal of the American Society for Information Science and Technology}, volume = {64}, journal = {Journal of the American Society for Information Science and Technology}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1532-2882}, doi = {10.1002/asi.22763}, pages = {464 -- 479}, year = {2013}, abstract = {The concept of Linked Data has made its entrance in the cultural heritage sector due to its potential use for the integration of heterogeneous collections and deriving additional value out of existing metadata. However, practitioners and researchers alike need a better understanding of what outcome they can reasonably expect of the reconciliation process between their local metadata and established controlled vocabularies which are already a part of the Linked Data cloud. This paper offers an in-depth analysis of how a locally developed vocabulary can be successfully reconciled with the Library of Congress Subject Headings (LCSH) and the Arts and Architecture Thesaurus (AAT) through the help of a general-purpose tool for interactive data transformation (OpenRefine). Issues negatively affecting the reconciliation process are identified and solutions are proposed in order to derive maximum value from existing metadata and controlled vocabularies in an automated manner.}, language = {en} } @article{VidelaGuziolowskiEduatietal.2015, author = {Videla, Santiago and Guziolowski, Carito and Eduati, Federica and Thiele, Sven and Gebser, Martin and Nicolas, Jacques and Saez-Rodriguez, Julio and Schaub, Torsten H. and Siegel, Anne}, title = {Learning Boolean logic models of signaling networks with ASP}, series = {Theoretical computer science}, volume = {599}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2014.06.022}, pages = {79 -- 101}, year = {2015}, abstract = {Boolean networks provide a simple yet powerful qualitative modeling approach in systems biology. However, manual identification of logic rules underlying the system being studied is in most cases out of reach. Therefore, automated inference of Boolean logical networks from experimental data is a fundamental question in this field. This paper addresses the problem consisting of learning from a prior knowledge network describing causal interactions and phosphorylation activities at a pseudo-steady state, Boolean logic models of immediate-early response in signaling transduction networks. The underlying optimization problem has been so far addressed through mathematical programming approaches and the use of dedicated genetic algorithms. In a recent work we have shown severe limitations of stochastic approaches in this domain and proposed to use Answer Set Programming (ASP), considering a simpler problem setting. Herein, we extend our previous work in order to consider more realistic biological conditions including numerical datasets, the presence of feedback-loops in the prior knowledge network and the necessity of multi-objective optimization. In order to cope with such extensions, we propose several discretization schemes and elaborate upon our previous ASP encoding. Towards real-world biological data, we evaluate the performance of our approach over in silico numerical datasets based on a real and large-scale prior knowledge network. The correctness of our encoding and discretization schemes are dealt with in Appendices A-B. (C) 2014 Elsevier B.V. All rights reserved.}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @article{WaitelonisJuergesSack2019, author = {Waitelonis, J{\"o}rg and J{\"u}rges, Henrik and Sack, Harald}, title = {Remixing entity linking evaluation datasets for focused benchmarking}, series = {Semantic Web}, volume = {10}, journal = {Semantic Web}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-0844}, doi = {10.3233/SW-180334}, pages = {385 -- 412}, year = {2019}, abstract = {In recent years, named entity linking (NEL) tools were primarily developed in terms of a general approach, whereas today numerous tools are focusing on specific domains such as e.g. the mapping of persons and organizations only, or the annotation of locations or events in microposts. However, the available benchmark datasets necessary for the evaluation of NEL tools do not reflect this focalizing trend. We have analyzed the evaluation process applied in the NEL benchmarking framework GERBIL [in: Proceedings of the 24th International Conference on World Wide Web (WWW'15), International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, Switzerland, 2015, pp. 1133-1143, Semantic Web 9(5) (2018), 605-625] and all its benchmark datasets. Based on these insights we have extended the GERBIL framework to enable a more fine grained evaluation and in depth analysis of the available benchmark datasets with respect to different emphases. This paper presents the implementation of an adaptive filter for arbitrary entities and customized benchmark creation as well as the automated determination of typical NEL benchmark dataset properties, such as the extent of content-related ambiguity and diversity. These properties are integrated on different levels, which also enables to tailor customized new datasets out of the existing ones by remixing documents based on desired emphases. Besides a new system library to enrich provided NIF [in: International Semantic Web Conference (ISWC'13), Lecture Notes in Computer Science, Vol. 8219, Springer, Berlin, Heidelberg, 2013, pp. 98-113] datasets with statistical information, best practices for dataset remixing are presented, and an in depth analysis of the performance of entity linking systems on special focus datasets is presented.}, language = {en} } @article{WaltonGordon2015, author = {Walton, Douglas and Gordon, Thomas F.}, title = {Formalizing informal logic}, series = {Informal logic : reasoning and argumentation in theory and practics}, volume = {35}, journal = {Informal logic : reasoning and argumentation in theory and practics}, number = {4}, publisher = {Centre for Research in Reasoning, Argumentation and Rhetoric, University of Windsor}, address = {Windsor}, issn = {0824-2577}, pages = {508 -- 538}, year = {2015}, abstract = {In this paper we investigate the extent to which formal argumentation models can handle ten basic characteristics of informal logic identified in the informal logic literature. By showing how almost all of these characteristics can be successfully modelled formally, we claim that good progress can be made toward the project of formalizing informal logic. Of the formal argumentation models available, we chose the Carneades Argumentation System (CAS), a formal, computational model of argument that uses argument graphs as its basis, structures of a kind very familiar to practitioners of informal logic through their use of argument diagrams.}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A comparative study of disjunctive well-founded semantics}, isbn = {3-540-42593-4}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {Disjunctive well-founded semantics revisited}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, isbn = {3-540-42254-4}, year = {2001}, language = {en} } @article{Wang2000, author = {Wang, Kewen}, title = {Argumentation-based abduction in disjunctive logic programming}, year = {2000}, language = {en} } @article{WangZhou2001, author = {Wang, Kewen and Zhou, Lizhu}, title = {An extension to GCWA and query evaluation for disjunctive deductive databases}, year = {2001}, language = {en} } @article{WangZhouLin2000, author = {Wang, Kewen and Zhou, Lizhu and Lin, Fangzhen}, title = {Alternating fixpoint theory for logic programs with priority}, isbn = {3-540-67797-6}, year = {2000}, language = {en} }