@article{ZienRaetschMikaetal.2000, author = {Zien, Alexander and R{\"a}tsch, Gunnar and Mika, Sebastian and Sch{\"o}lkopf, Bernhard and Lengauer, Thomas and M{\"u}ller, Klaus-Robert}, title = {Engineering support vector machine kernels that recognize translation initiation sites}, issn = {1367-4803}, year = {2000}, language = {en} } @article{ZieheMuellerNolteetal.2000, author = {Ziehe, Andreas and M{\"u}ller, Klaus-Robert and Nolte, G. and Mackert, B.-M. and Curio, Gabriel}, title = {Artifact reduction in magnetoneurography based on time-delayed second-order correlations}, year = {2000}, language = {en} } @article{ZieheKawanabeHarmeling2004, author = {Ziehe, Andreas and Kawanabe, Motoaki and Harmeling, Stefan}, title = {Blind separation of post-nonlinear mixtures using linearizing transformations and temporal decorrelation}, issn = {1532-4435}, year = {2004}, abstract = {We propose two methods that reduce the post-nonlinear blind source separation problem (PNL-BSS) to a linear BSS problem. The first method is based on the concept of maximal correlation: we apply the alternating conditional expectation (ACE) algorithm-a powerful technique from nonparametric statistics-to approximately invert the componentwise nonlinear functions. The second method is a Gaussianizing transformation, which is motivated by the fact that linearly mixed signals before nonlinear transformation are approximately Gaussian distributed. This heuristic, but simple and efficient procedure works as good as the ACE method. Using the framework provided by ACE, convergence can be proven. The optimal transformations obtained by ACE coincide with the sought-after inverse functions of the nonlinearitics. After equalizing the nonlinearities, temporal decorrelation separation (TDSEP) allows us to recover the source signals. Numerical simulations testing "ACE-TD" and "Gauss-TD" on realistic examples are performed with excellent results}, language = {en} } @phdthesis{Ziehe2005, author = {Ziehe, Andreas}, title = {Blind source separation based on joint diagonalization of matrices with applications in biomedical signal processing}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-5694}, school = {Universit{\"a}t Potsdam}, year = {2005}, abstract = {This thesis is concerned with the solution of the blind source separation problem (BSS). The BSS problem occurs frequently in various scientific and technical applications. In essence, it consists in separating meaningful underlying components out of a mixture of a multitude of superimposed signals. In the recent research literature there are two related approaches to the BSS problem: The first is known as Independent Component Analysis (ICA), where the goal is to transform the data such that the components become as independent as possible. The second is based on the notion of diagonality of certain characteristic matrices derived from the data. Here the goal is to transform the matrices such that they become as diagonal as possible. In this thesis we study the latter method of approximate joint diagonalization (AJD) to achieve a solution of the BSS problem. After an introduction to the general setting, the thesis provides an overview on particular choices for the set of target matrices that can be used for BSS by joint diagonalization. As the main contribution of the thesis, new algorithms for approximate joint diagonalization of several matrices with non-orthogonal transformations are developed. These newly developed algorithms will be tested on synthetic benchmark datasets and compared to other previous diagonalization algorithms. Applications of the BSS methods to biomedical signal processing are discussed and exemplified with real-life data sets of multi-channel biomagnetic recordings.}, subject = {Signaltrennung}, language = {en} } @phdthesis{Zhou2008, author = {Zhou, Wei}, title = {Access control model and policies for collaborative environments}, address = {Potsdam}, pages = {199 S. : graph. Darst.}, year = {2008}, language = {en} } @article{ZenderMetzlerLucke2014, author = {Zender, Raphael and Metzler, Richard and Lucke, Ulrike}, title = {FreshUP-A pervasive educational game for freshmen}, series = {Pervasive and mobile computing}, volume = {14}, journal = {Pervasive and mobile computing}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2013.09.003}, pages = {47 -- 56}, year = {2014}, abstract = {Students beginning their studies at university face manifold problems such as orientation in a new environment and organizing their courses. This article presents the implementation and successful empirical evaluation of the pervasive browser-based educational game "FreshUP", which aims at helping to overcome the initial difficulties of freshmen. In contrast to a conventional scavenger hunt, mobile pervasive games like FreshUP, bridging in-game and real world activities, have the potential to provide help in a motivating manner using new technology which is currently becoming more and more common. (C) 2013 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{Zarezadeh2012, author = {Zarezadeh, Aliakbar}, title = {Distributed smart cameras : architecture and communication protocols}, address = {Potsdam}, pages = {135 S.}, year = {2012}, language = {en} } @phdthesis{Yang2013, author = {Yang, Haojin}, title = {Automatic video indexing and retrieval using video ocr technology}, address = {Potsdam}, pages = {182 S.}, year = {2013}, language = {en} } @misc{XenikoudakisAhmedHarrisetal.2020, author = {Xenikoudakis, Georgios and Ahmed, Mayeesha and Harris, Jacob Colt and Wadleigh, Rachel and Paijmans, Johanna L. A. and Hartmann, Stefanie and Barlow, Axel and Lerner, Heather and Hofreiter, Michael}, title = {Ancient DNA reveals twenty million years of aquatic life in beavers}, series = {Current biology : CB}, volume = {30}, journal = {Current biology : CB}, number = {3}, publisher = {Current Biology Ltd.}, address = {London}, issn = {0960-9822}, doi = {10.1016/j.cub.2019.12.041}, pages = {R110 -- R111}, year = {2020}, abstract = {Xenikoudakis et al. report a partial mitochondrial genome of the extinct giant beaver Castoroides and estimate the origin of aquatic behavior in beavers to approximately 20 million years. This time estimate coincides with the extinction of terrestrial beavers and raises the question whether the two events had a common cause.}, language = {en} } @article{WuebbelerZieheMackertetal.2000, author = {W{\"u}bbeler, G. and Ziehe, Andreas and Mackert, B.-M. and M{\"u}ller, Klaus-Robert and Trahms, L. and Curio, Gabriel}, title = {Independent component analysis of noninvasively recorded cortical magnetic DC-fields in humans}, year = {2000}, language = {en} } @phdthesis{Wust2015, author = {Wust, Johannes}, title = {Mixed workload managment for in-memory databases}, pages = {VIII, 167}, year = {2015}, language = {en} } @book{WuKarriKuznetsovetal.2003, author = {Wu, K. and Karri, R. and Kuznetsov, Grigory and G{\"o}ssel, Michael}, title = {Low Cost Concurrent Error Detection for the Advanced Encryption Standart}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2003, 8}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {10 S.}, year = {2003}, language = {en} } @article{WongBesnard2000, author = {Wong, P. and Besnard, Philippe}, title = {Paraconsistent reasoning as an analytical tool}, issn = {1367-0751}, year = {2000}, language = {en} } @phdthesis{Wolter2010, author = {Wolter, Christian}, title = {A methodology for model-driven process security}, address = {Potsdam}, pages = {xv, 144 S. : graph. Darst.}, year = {2010}, language = {en} } @phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @article{WilligMitschke2006, author = {Willig, Andreas and Mitschke, Robert}, title = {Results of bit error measurements with sensor nodes and casuistic consequences for design of energy-efficient error control schemes}, isbn = {978-3-540-32158-3}, year = {2006}, abstract = {For the proper design of energy-efficient error control schemes some insight into channel error patterns is needed. This paper presents bit error and packet loss measurements taken with sensor nodes running the popular RFM}, language = {en} } @article{WilligMatheusWolisz2005, author = {Willig, Andreas and Matheus, K. and Wolisz, A.}, title = {Wireless technology in industrial networks}, issn = {0018-9219}, year = {2005}, abstract = {With the success of wireless technologies in consumer electronics, standard wireless technologies are envisioned for the deployment in industrial environments as well. Industrial applications involving mobile subsystems or just the desire to save cabling make wireless technologies attractive. Nevertheless, these applications often have stringent requirements on reliability and timing. In wired environments, timing and reliability are well catered for by fieldbus systems (which are a mature technology designed to enable communication between digital controllers and the sensors and actuators interfacing to a physical process). When wireless links are included, reliability and timing requirements are significantly more difficult to meet, due to the adverse properties of the radio channels. In this paper we thus discuss some key issues coming up in wireless fieldbus and wireless industrial communication systems:1)fundamental problems like achieving timely and reliable transmission despite channel errors; 2) the usage of existing wireless technologies for this specific field of applications; and 3) the creation of hybrid systems in which wireless stations are included into existing wired systems}, language = {en} } @phdthesis{Wildner1997, author = {Wildner, Uwe}, title = {CASC : compiler assisted self-checking of structural integrity}, pages = {XIV, 134 S. : graph. Darst.}, year = {1997}, language = {en} } @article{Wildner1997, author = {Wildner, Uwe}, title = {Experimental evaluation of assigned signature checking with return address hashing on different platforms}, year = {1997}, language = {en} } @article{Wildner1996, author = {Wildner, Uwe}, title = {Compiler assisted self-checking of structural integrity using return adress hashing}, year = {1996}, language = {en} } @article{WeskevanderAalstVerbeek2004, author = {Weske, Mathias and van der Aalst, Wil M. P. and Verbeek, H. M. W.}, title = {Advances in business process management}, issn = {0169-023X}, year = {2004}, language = {en} } @book{Weske2007, author = {Weske, Mathias}, title = {Business Process Management : Concepts, Languages, Architectures}, publisher = {Springer-Verlag Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-540-73521-2}, doi = {10.1007/978-3-540-73522-9}, pages = {368 S.}, year = {2007}, language = {en} } @article{WeiherHirschfeld2014, author = {Weiher, Marcel and Hirschfeld, Robert}, title = {Polymorphic identifiers: uniform resource access in objective-smalltalk}, series = {ACM SIGPLAN notices}, volume = {49}, journal = {ACM SIGPLAN notices}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/2508168.2508169}, pages = {61 -- 71}, year = {2014}, abstract = {In object-oriented programming, polymorphic dispatch of operations decouples clients from specific providers of services and allows implementations to be modified or substituted without affecting clients. The Uniform Access Principle (UAP) tries to extend these qualities to resource access by demanding that access to state be indistinguishable from access to operations. Despite language features supporting the UAP, the overall goal of substitutability has not been achieved for either alternative resources such as keyed storage, files or web pages, or for alternate access mechanisms: specific kinds of resources are bound to specific access mechanisms and vice versa. Changing storage or access patterns either requires changes to both clients and service providers and trying to maintain the UAP imposes significant penalties in terms of code-duplication and/or performance overhead. We propose introducing first class identifiers as polymorphic names for storage locations to solve these problems. With these Polymorphic Identifiers, we show that we can provide uniform access to a wide variety of resource types as well as storage and access mechanisms, whether parametrized or direct, without affecting client code, without causing code duplication or significant performance penalties.}, language = {en} } @article{WeidlichMendlingWeske2011, author = {Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Efficient consistency measurement based on behavioral profiles of process models}, series = {IEEE transactions on software engineering}, volume = {37}, journal = {IEEE transactions on software engineering}, number = {3}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0098-5589}, doi = {10.1109/TSE.2010.96}, pages = {410 -- 429}, year = {2011}, abstract = {Engineering of process-driven business applications can be supported by process modeling efforts in order to bridge the gap between business requirements and system specifications. However, diverging purposes of business process modeling initiatives have led to significant problems in aligning related models at different abstract levels and different perspectives. Checking the consistency of such corresponding models is a major challenge for process modeling theory and practice. In this paper, we take the inappropriateness of existing strict notions of behavioral equivalence as a starting point. Our contribution is a concept called behavioral profile that captures the essential behavioral constraints of a process model. We show that these profiles can be computed efficiently, i.e., in cubic time for sound free-choice Petri nets w.r.t. their number of places and transitions. We use behavioral profiles for the definition of a formal notion of consistency which is less sensitive to model projections than common criteria of behavioral equivalence and allows for quantifying deviation in a metric way. The derivation of behavioral profiles and the calculation of a degree of consistency have been implemented to demonstrate the applicability of our approach. We also report the findings from checking consistency between partially overlapping models of the SAP reference model.}, language = {en} } @article{WegnerZenderLucke2015, author = {Wegner, Christian and Zender, Raphael and Lucke, Ulrike}, title = {ProtoSense}, series = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, journal = {KEYCIT 2014 - Key Competencies in Informatics and ICT}, number = {7}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, issn = {1868-0844}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-82970}, pages = {405 -- 407}, year = {2015}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @article{WangZhouLin2000, author = {Wang, Kewen and Zhou, Lizhu and Lin, Fangzhen}, title = {Alternating fixpoint theory for logic programs with priority}, isbn = {3-540-67797-6}, year = {2000}, language = {en} } @article{WangZhou2001, author = {Wang, Kewen and Zhou, Lizhu}, title = {An extension to GCWA and query evaluation for disjunctive deductive databases}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A comparative study of disjunctive well-founded semantics}, isbn = {3-540-42593-4}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {Disjunctive well-founded semantics revisited}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, year = {2001}, language = {en} } @article{Wang2001, author = {Wang, Kewen}, title = {A top-down procedure for disjucntive well-founded semantics}, isbn = {3-540-42254-4}, year = {2001}, language = {en} } @article{Wang2000, author = {Wang, Kewen}, title = {Argumentation-based abduction in disjunctive logic programming}, year = {2000}, language = {en} } @article{WaltonGordon2015, author = {Walton, Douglas and Gordon, Thomas F.}, title = {Formalizing informal logic}, series = {Informal logic : reasoning and argumentation in theory and practics}, volume = {35}, journal = {Informal logic : reasoning and argumentation in theory and practics}, number = {4}, publisher = {Centre for Research in Reasoning, Argumentation and Rhetoric, University of Windsor}, address = {Windsor}, issn = {0824-2577}, pages = {508 -- 538}, year = {2015}, abstract = {In this paper we investigate the extent to which formal argumentation models can handle ten basic characteristics of informal logic identified in the informal logic literature. By showing how almost all of these characteristics can be successfully modelled formally, we claim that good progress can be made toward the project of formalizing informal logic. Of the formal argumentation models available, we chose the Carneades Argumentation System (CAS), a formal, computational model of argument that uses argument graphs as its basis, structures of a kind very familiar to practitioners of informal logic through their use of argument diagrams.}, language = {en} } @article{WaitelonisJuergesSack2019, author = {Waitelonis, J{\"o}rg and J{\"u}rges, Henrik and Sack, Harald}, title = {Remixing entity linking evaluation datasets for focused benchmarking}, series = {Semantic Web}, volume = {10}, journal = {Semantic Web}, number = {2}, publisher = {IOS Press}, address = {Amsterdam}, issn = {1570-0844}, doi = {10.3233/SW-180334}, pages = {385 -- 412}, year = {2019}, abstract = {In recent years, named entity linking (NEL) tools were primarily developed in terms of a general approach, whereas today numerous tools are focusing on specific domains such as e.g. the mapping of persons and organizations only, or the annotation of locations or events in microposts. However, the available benchmark datasets necessary for the evaluation of NEL tools do not reflect this focalizing trend. We have analyzed the evaluation process applied in the NEL benchmarking framework GERBIL [in: Proceedings of the 24th International Conference on World Wide Web (WWW'15), International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, Switzerland, 2015, pp. 1133-1143, Semantic Web 9(5) (2018), 605-625] and all its benchmark datasets. Based on these insights we have extended the GERBIL framework to enable a more fine grained evaluation and in depth analysis of the available benchmark datasets with respect to different emphases. This paper presents the implementation of an adaptive filter for arbitrary entities and customized benchmark creation as well as the automated determination of typical NEL benchmark dataset properties, such as the extent of content-related ambiguity and diversity. These properties are integrated on different levels, which also enables to tailor customized new datasets out of the existing ones by remixing documents based on desired emphases. Besides a new system library to enrich provided NIF [in: International Semantic Web Conference (ISWC'13), Lecture Notes in Computer Science, Vol. 8219, Springer, Berlin, Heidelberg, 2013, pp. 98-113] datasets with statistical information, best practices for dataset remixing are presented, and an in depth analysis of the performance of entity linking systems on special focus datasets is presented.}, language = {en} } @book{Vogel2010, author = {Vogel, Tobias}, title = {Web service generation and data quality web services}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @book{Vogel2010, author = {Vogel, Thomas}, title = {Models at runtime for Monitoring and adapting software systems}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{Vierheller2014, author = {Vierheller, Janine}, title = {Exploratory Data Analysis}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, editor = {Lambrecht, Anna-Lena and Margaria, Tiziana}, publisher = {Axel Springer Verlag}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {110 -- 126}, year = {2014}, abstract = {In bioinformatics the term exploratory data analysis refers to different methods to get an overview of large biological data sets. Hence, it helps to create a framework for further analysis and hypothesis testing. The workflow facilitates this first important step of the data analysis created by high-throughput technologies. The results are different plots showing the structure of the measurements. The goal of the workflow is the automatization of the exploratory data analysis, but also the flexibility should be guaranteed. The basic tool is the free software R.}, language = {en} } @article{VidelaGuziolowskiEduatietal.2015, author = {Videla, Santiago and Guziolowski, Carito and Eduati, Federica and Thiele, Sven and Gebser, Martin and Nicolas, Jacques and Saez-Rodriguez, Julio and Schaub, Torsten H. and Siegel, Anne}, title = {Learning Boolean logic models of signaling networks with ASP}, series = {Theoretical computer science}, volume = {599}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2014.06.022}, pages = {79 -- 101}, year = {2015}, abstract = {Boolean networks provide a simple yet powerful qualitative modeling approach in systems biology. However, manual identification of logic rules underlying the system being studied is in most cases out of reach. Therefore, automated inference of Boolean logical networks from experimental data is a fundamental question in this field. This paper addresses the problem consisting of learning from a prior knowledge network describing causal interactions and phosphorylation activities at a pseudo-steady state, Boolean logic models of immediate-early response in signaling transduction networks. The underlying optimization problem has been so far addressed through mathematical programming approaches and the use of dedicated genetic algorithms. In a recent work we have shown severe limitations of stochastic approaches in this domain and proposed to use Answer Set Programming (ASP), considering a simpler problem setting. Herein, we extend our previous work in order to consider more realistic biological conditions including numerical datasets, the presence of feedback-loops in the prior knowledge network and the necessity of multi-objective optimization. In order to cope with such extensions, we propose several discretization schemes and elaborate upon our previous ASP encoding. Towards real-world biological data, we evaluate the performance of our approach over in silico numerical datasets based on a real and large-scale prior knowledge network. The correctness of our encoding and discretization schemes are dealt with in Appendices A-B. (C) 2014 Elsevier B.V. All rights reserved.}, language = {en} } @phdthesis{Videla2014, author = {Videla, Santiago}, title = {Reasoning on the response of logical signaling networks with answer set programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-71890}, school = {Universit{\"a}t Potsdam}, year = {2014}, abstract = {Deciphering the functioning of biological networks is one of the central tasks in systems biology. In particular, signal transduction networks are crucial for the understanding of the cellular response to external and internal perturbations. Importantly, in order to cope with the complexity of these networks, mathematical and computational modeling is required. We propose a computational modeling framework in order to achieve more robust discoveries in the context of logical signaling networks. More precisely, we focus on modeling the response of logical signaling networks by means of automated reasoning using Answer Set Programming (ASP). ASP provides a declarative language for modeling various knowledge representation and reasoning problems. Moreover, available ASP solvers provide several reasoning modes for assessing the multitude of answer sets. Therefore, leveraging its rich modeling language and its highly efficient solving capacities, we use ASP to address three challenging problems in the context of logical signaling networks: learning of (Boolean) logical networks, experimental design, and identification of intervention strategies. Overall, the contribution of this thesis is three-fold. Firstly, we introduce a mathematical framework for characterizing and reasoning on the response of logical signaling networks. Secondly, we contribute to a growing list of successful applications of ASP in systems biology. Thirdly, we present a software providing a complete pipeline for automated reasoning on the response of logical signaling networks.}, language = {en} } @article{vanHoolandVerborghDeWildeetal.2013, author = {van Hooland, Seth and Verborgh, Ruben and De Wilde, Max and Hercher, Johannes and Mannens, Erik and Van de Walle, Rik}, title = {Evaluating the success of vocabulary reconciliation for cultural heritage collections}, series = {Journal of the American Society for Information Science and Technology}, volume = {64}, journal = {Journal of the American Society for Information Science and Technology}, number = {3}, publisher = {Wiley-Blackwell}, address = {Hoboken}, issn = {1532-2882}, doi = {10.1002/asi.22763}, pages = {464 -- 479}, year = {2013}, abstract = {The concept of Linked Data has made its entrance in the cultural heritage sector due to its potential use for the integration of heterogeneous collections and deriving additional value out of existing metadata. However, practitioners and researchers alike need a better understanding of what outcome they can reasonably expect of the reconciliation process between their local metadata and established controlled vocabularies which are already a part of the Linked Data cloud. This paper offers an in-depth analysis of how a locally developed vocabulary can be successfully reconciled with the Library of Congress Subject Headings (LCSH) and the Arts and Architecture Thesaurus (AAT) through the help of a general-purpose tool for interactive data transformation (OpenRefine). Issues negatively affecting the reconciliation process are identified and solutions are proposed in order to derive maximum value from existing metadata and controlled vocabularies in an automated manner.}, language = {en} } @article{UflackerKowarkZeier2011, author = {Uflacker, Matthias and Kowark, Thomas and Zeier, Alexander}, title = {An instrument for real-time design interaction capture}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @phdthesis{Uflacker2010, author = {Uflacker, Matthias}, title = {Monitoring virtual team collaboration : methods, applications and experiences in engineering design}, address = {Potsdam}, pages = {203 S.}, year = {2010}, language = {en} } @article{Uflacker2010, author = {Uflacker, Matthias}, title = {Computational analysis of virtual team collaboration in teh early stages of engineering design}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @book{TsudaSugiyamaMueller2000, author = {Tsuda, Koji and Sugiyama, Masashi and M{\"u}ller, Klaus-Robert}, title = {Subspace information criterion for non-quadratice regularizers : model selection for sparse regressors}, series = {GMD-Report}, volume = {120}, journal = {GMD-Report}, publisher = {GMD-Forschungszentrum Informationstechnik}, address = {Sankt Augustin}, pages = {36 S.}, year = {2000}, language = {en} } @book{Truemper2010, author = {Tr{\"u}mper, Jonas}, title = {Towards visualization of complex, service-based software systems}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{TroegerMerzky2014, author = {Troeger, Peter and Merzky, Andre}, title = {Towards standardized job submission and control in infrastructure clouds}, series = {Journal of grid computing}, volume = {12}, journal = {Journal of grid computing}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {1570-7873}, doi = {10.1007/s10723-013-9275-2}, pages = {111 -- 125}, year = {2014}, abstract = {The submission and management of computational jobs is a traditional part of utility computing environments. End users and developers of domain-specific software abstractions often have to deal with the heterogeneity of such batch processing systems. This lead to a number of application programming interface and job description standards in the past, which are implemented and established for cluster and Grid systems. With the recent rise of cloud computing as new utility computing paradigm, the standardized access to batch processing facilities operated on cloud resources becomes an important issue. Furthermore, the design of such a standard has to consider a tradeoff between feature completeness and the achievable level of interoperability. The article discusses this general challenge, and presents some existing standards with traditional cluster and Grid computing background that may be applicable to cloud environments. We present OCCI-DRMAA as one approach for standardized access to batch processing facilities hosted in a cloud.}, language = {en} } @misc{Trapp2007, type = {Master Thesis}, author = {Trapp, Matthias}, title = {Analysis and exploration of virtual 3D city models using 3D information lenses}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-13930}, school = {Universit{\"a}t Potsdam}, year = {2007}, abstract = {This thesis addresses real-time rendering techniques for 3D information lenses based on the focus \& context metaphor. It analyzes, conceives, implements, and reviews its applicability to objects and structures of virtual 3D city models. In contrast to digital terrain models, the application of focus \& context visualization to virtual 3D city models is barely researched. However, the purposeful visualization of contextual data of is extreme importance for the interactive exploration and analysis of this field. Programmable hardware enables the implementation of new lens techniques, that allow the augmentation of the perceptive and cognitive quality of the visualization compared to classical perspective projections. A set of 3D information lenses is integrated into a 3D scene-graph system: • Occlusion lenses modify the appearance of virtual 3D city model objects to resolve their occlusion and consequently facilitate the navigation. • Best-view lenses display city model objects in a priority-based manner and mediate their meta information. Thus, they support exploration and navigation of virtual 3D city models. • Color and deformation lenses modify the appearance and geometry of 3D city models to facilitate their perception. The presented techniques for 3D information lenses and their application to virtual 3D city models clarify their potential for interactive visualization and form a base for further development.}, language = {en} } @phdthesis{Trapp2013, author = {Trapp, Matthias}, title = {Interactive rendering techniques for focus+context visualization of 3D geovirtual environments}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-66824}, school = {Universit{\"a}t Potsdam}, year = {2013}, abstract = {This thesis introduces a collection of new real-time rendering techniques and applications for focus+context visualization of interactive 3D geovirtual environments such as virtual 3D city and landscape models. These environments are generally characterized by a large number of objects and are of high complexity with respect to geometry and textures. For these reasons, their interactive 3D rendering represents a major challenge. Their 3D depiction implies a number of weaknesses such as occlusions, cluttered image contents, and partial screen-space usage. To overcome these limitations and, thus, to facilitate the effective communication of geo-information, principles of focus+context visualization can be used for the design of real-time 3D rendering techniques for 3D geovirtual environments (see Figure). In general, detailed views of a 3D geovirtual environment are combined seamlessly with abstracted views of the context within a single image. To perform the real-time image synthesis required for interactive visualization, dedicated parallel processors (GPUs) for rasterization of computer graphics primitives are used. For this purpose, the design and implementation of appropriate data structures and rendering pipelines are necessary. The contribution of this work comprises the following five real-time rendering methods: • The rendering technique for 3D generalization lenses enables the combination of different 3D city geometries (e.g., generalized versions of a 3D city model) in a single image in real time. The method is based on a generalized and fragment-precise clipping approach, which uses a compressible, raster-based data structure. It enables the combination of detailed views in the focus area with the representation of abstracted variants in the context area. • The rendering technique for the interactive visualization of dynamic raster data in 3D geovirtual environments facilitates the rendering of 2D surface lenses. It enables a flexible combination of different raster layers (e.g., aerial images or videos) using projective texturing for decoupling image and geometry data. Thus, various overlapping and nested 2D surface lenses of different contents can be visualized interactively. • The interactive rendering technique for image-based deformation of 3D geovirtual environments enables the real-time image synthesis of non-planar projections, such as cylindrical and spherical projections, as well as multi-focal 3D fisheye-lenses and the combination of planar and non-planar projections. • The rendering technique for view-dependent multi-perspective views of 3D geovirtual environments, based on the application of global deformations to the 3D scene geometry, can be used for synthesizing interactive panorama maps to combine detailed views close to the camera (focus) with abstract views in the background (context). This approach reduces occlusions, increases the usage the available screen space, and reduces the overload of image contents. • The object-based and image-based rendering techniques for highlighting objects and focus areas inside and outside the view frustum facilitate preattentive perception. The concepts and implementations of interactive image synthesis for focus+context visualization and their selected applications enable a more effective communication of spatial information, and provide building blocks for design and development of new applications and systems in the field of 3D geovirtual environments.}, language = {en} } @article{TranPontelliBalduccinietal.2022, author = {Tran, Son Cao and Pontelli, Enrico and Balduccini, Marcello and Schaub, Torsten}, title = {Answer set planning}, series = {Theory and practice of logic programming}, journal = {Theory and practice of logic programming}, publisher = {Cambridge University Press}, address = {New York}, issn = {1471-0684}, doi = {10.1017/S1471068422000072}, pages = {73}, year = {2022}, abstract = {Answer Set Planning refers to the use of Answer Set Programming (ASP) to compute plans, that is, solutions to planning problems, that transform a given state of the world to another state. The development of efficient and scalable answer set solvers has provided a significant boost to the development of ASP-based planning systems. This paper surveys the progress made during the last two and a half decades in the area of answer set planning, from its foundations to its use in challenging planning domains. The survey explores the advantages and disadvantages of answer set planning. It also discusses typical applications of answer set planning and presents a set of challenges for future research.}, language = {en} } @article{TiwariPrakashGrossetal.2020, author = {Tiwari, Abhishek and Prakash, Jyoti and Groß, Sascha and Hammer, Christian}, title = {A large scale analysis of Android}, series = {The journal of systems and software}, volume = {170}, journal = {The journal of systems and software}, publisher = {Elsevier}, address = {New York}, issn = {0164-1212}, doi = {10.1016/j.jss.2020.110775}, pages = {17}, year = {2020}, abstract = {Many Android applications embed webpages via WebView components and execute JavaScript code within Android. Hybrid applications leverage dedicated APIs to load a resource and render it in a WebView. Furthermore, Android objects can be shared with the JavaScript world. However, bridging the interfaces of the Android and JavaScript world might also incur severe security threats: Potentially untrusted webpages and their JavaScript might interfere with the Android environment and its access to native features. No general analysis is currently available to assess the implications of such hybrid apps bridging the two worlds. To understand the semantics and effects of hybrid apps, we perform a large-scale study on the usage of the hybridization APIs in the wild. We analyze and categorize the parameters to hybridization APIs for 7,500 randomly selected and the 196 most popular applications from the Google Playstore as well as 1000 malware samples. Our results advance the general understanding of hybrid applications, as well as implications for potential program analyses, and the current security situation: We discovered thousands of flows of sensitive data from Android to JavaScript, the vast majority of which could flow to potentially untrustworthy code. Our analysis identified numerous web pages embedding vulnerabilities, which we exemplarily exploited. Additionally, we discovered a multitude of applications in which potentially untrusted JavaScript code may interfere with (trusted) Android objects, both in benign and malign applications.}, language = {en} } @phdthesis{Tiwari2019, author = {Tiwari, Abhishek}, title = {Enhancing Users' Privacy: Static Resolution of the Dynamic Properties of Android}, school = {Universit{\"a}t Potsdam}, pages = {xiii, 111}, year = {2019}, abstract = {The usage of mobile devices is rapidly growing with Android being the most prevalent mobile operating system. Thanks to the vast variety of mobile applications, users are preferring smartphones over desktops for day to day tasks like Internet surfing. Consequently, smartphones store a plenitude of sensitive data. This data together with the high values of smartphones make them an attractive target for device/data theft (thieves/malicious applications). Unfortunately, state-of-the-art anti-theft solutions do not work if they do not have an active network connection, e.g., if the SIM card was removed from the device. In the majority of these cases, device owners permanently lose their smartphone together with their personal data, which is even worse. Apart from that malevolent applications perform malicious activities to steal sensitive information from smartphones. Recent research considered static program analysis to detect dangerous data leaks. These analyses work well for data leaks due to inter-component communication, but suffer from shortcomings for inter-app communication with respect to precision, soundness, and scalability. This thesis focuses on enhancing users' privacy on Android against physical device loss/theft and (un)intentional data leaks. It presents three novel frameworks: (1) ThiefTrap, an anti-theft framework for Android, (2) IIFA, a modular inter-app intent information flow analysis of Android applications, and (3) PIAnalyzer, a precise approach for PendingIntent vulnerability analysis. ThiefTrap is based on a novel concept of an anti-theft honeypot account that protects the owner's data while preventing a thief from resetting the device. We implemented the proposed scheme and evaluated it through an empirical user study with 35 participants. In this study, the owner's data could be protected, recovered, and anti-theft functionality could be performed unnoticed from the thief in all cases. IIFA proposes a novel approach for Android's inter-component/inter-app communication (ICC/IAC) analysis. Our main contribution is the first fully automatic, sound, and precise ICC/IAC information flow analysis that is scalable for realistic apps due to modularity, avoiding combinatorial explosion: Our approach determines communicating apps using short summaries rather than inlining intent calls between components and apps, which requires simultaneously analyzing all apps installed on a device. We evaluate IIFA in terms of precision, recall, and demonstrate its scalability to a large corpus of real-world apps. IIFA reports 62 problematic ICC-/IAC-related information flows via two or more apps/components. PIAnalyzer proposes a novel approach to analyze PendingIntent related vulnerabilities. PendingIntents are a powerful and universal feature of Android for inter-component communication. We empirically evaluate PIAnalyzer on a set of 1000 randomly selected applications and find 1358 insecure usages of PendingIntents, including 70 severe vulnerabilities.}, language = {en} } @article{ThonLandwehrDeRaedt2011, author = {Thon, Ingo and Landwehr, Niels and De Raedt, Luc}, title = {Stochastic relational processes efficient inference and applications}, series = {Machine learning}, volume = {82}, journal = {Machine learning}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-010-5213-8}, pages = {239 -- 272}, year = {2011}, abstract = {One of the goals of artificial intelligence is to develop agents that learn and act in complex environments. Realistic environments typically feature a variable number of objects, relations amongst them, and non-deterministic transition behavior. While standard probabilistic sequence models provide efficient inference and learning techniques for sequential data, they typically cannot fully capture the relational complexity. On the other hand, statistical relational learning techniques are often too inefficient to cope with complex sequential data. In this paper, we introduce a simple model that occupies an intermediate position in this expressiveness/efficiency trade-off. It is based on CP-logic (Causal Probabilistic Logic), an expressive probabilistic logic for modeling causality. However, by specializing CP-logic to represent a probability distribution over sequences of relational state descriptions and employing a Markov assumption, inference and learning become more tractable and effective. Specifically, we show how to solve part of the inference and learning problems directly at the first-order level, while transforming the remaining part into the problem of computing all satisfying assignments for a Boolean formula in a binary decision diagram. We experimentally validate that the resulting technique is able to handle probabilistic relational domains with a substantial number of objects and relations.}, language = {en} } @article{Thomas2010, author = {Thomas, Ivonne}, title = {Reliable digital identities for SOA and the Web}, isbn = {978-3-86956-036-6}, year = {2010}, language = {en} } @article{ThienenNoweskiRauthetal.2012, author = {Thienen, Julia von and Noweski, Christine and Rauth, Ingo and Meinel, Christoph and Lange, Sabine}, title = {If you want to know who are, tell me where you are : the importance of places}, year = {2012}, language = {en} } @article{ThienenNoweskiMeineletal.2011, author = {Thienen, Julia von and Noweski, Christine and Meinel, Christoph and Rauth, Ingo}, title = {The co-evolution of theory and practice in design thinking - or - "Mind the oddness trap!"}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{ThienenNoweskiMeineletal.2012, author = {Thienen, Julia von and Noweski, Christine and Meinel, Christoph and Lang, Sabine and Nicolai, Claudia and Bartz, Andreas}, title = {What can design thinking learn from behavior group theraphy?}, isbn = {978-3-642-31990-7}, year = {2012}, language = {en} } @article{ThielscherSchaub1995, author = {Thielscher, Michael and Schaub, Torsten H.}, title = {Default reasoning by deductive planning}, year = {1995}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @article{Teske2014, author = {Teske, Daniel}, title = {Geocoder accuracy ranking}, series = {Process design for natural scientists: an agile model-driven approach}, journal = {Process design for natural scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {161 -- 174}, year = {2014}, abstract = {Finding an address on a map is sometimes tricky: the chosen map application may be unfamiliar with the enclosed region. There are several geocoders on the market, they have different databases and algorithms to compute the query. Consequently, the geocoding results differ in their quality. Fortunately the geocoders provide a rich set of metadata. The workflow described in this paper compares this metadata with the aim to find out which geocoder is offering the best-fitting coordinate for a given address.}, language = {en} } @book{Tepoyan2008, author = {Tepoyan, Liparit}, title = {The Mixed problem for a degenerate operator equation}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {13 S.}, year = {2008}, language = {en} } @book{Tepoyan2004, author = {Tepoyan, Liparit}, title = {The Neumann problem for a degenerate operator equation}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {11 S.}, year = {2004}, language = {en} } @book{Tepoyan2000, author = {Tepoyan, Liparit}, title = {Degenerated operator equations og higher order}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {13 S.}, year = {2000}, language = {en} } @article{TavakoliAlirezazadehHedayatipouretal.2021, author = {Tavakoli, Hamad and Alirezazadeh, Pendar and Hedayatipour, Ava and Nasib, A. H. Banijamali and Landwehr, Niels}, title = {Leaf image-based classification of some common bean cultivars using discriminative convolutional neural networks}, series = {Computers and electronics in agriculture : COMPAG online ; an international journal}, volume = {181}, journal = {Computers and electronics in agriculture : COMPAG online ; an international journal}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0168-1699}, doi = {10.1016/j.compag.2020.105935}, pages = {11}, year = {2021}, abstract = {In recent years, many efforts have been made to apply image processing techniques for plant leaf identification. However, categorizing leaf images at the cultivar/variety level, because of the very low inter-class variability, is still a challenging task. In this research, we propose an automatic discriminative method based on convolutional neural networks (CNNs) for classifying 12 different cultivars of common beans that belong to three various species. We show that employing advanced loss functions, such as Additive Angular Margin Loss and Large Margin Cosine Loss, instead of the standard softmax loss function for the classification can yield better discrimination between classes and thereby mitigate the problem of low inter-class variability. The method was evaluated by classifying species (level I), cultivars from the same species (level II), and cultivars from different species (level III), based on images from the leaf foreside and backside. The results indicate that the performance of the classification algorithm on the leaf backside image dataset is superior. The maximum mean classification accuracies of 95.86, 91.37 and 86.87\% were obtained at the levels I, II and III, respectively. The proposed method outperforms the previous relevant works and provides a reliable approach for plant cultivars identification.}, language = {en} } @article{Tarnick1995, author = {Tarnick, Steffen}, title = {Controllable self-checking checkers for conditional concurrent checking}, year = {1995}, language = {en} } @phdthesis{Tarnick1995, author = {Tarnick, Steffen}, title = {Data compression techniques for concurrent error detection and built-in self test}, pages = {159 S. : Ill.}, year = {1995}, language = {en} } @article{Tarnick1994, author = {Tarnick, Steffen}, title = {Bounding error masking in linear output space compression schemes}, year = {1994}, language = {en} } @article{Tarnick1994, author = {Tarnick, Steffen}, title = {Controllable self-checking checkers for conditional concurrent checking}, year = {1994}, language = {en} } @book{Tarkhanov2004, author = {Tarkhanov, Nikolai Nikolaevich}, title = {Harmonic integrals on domains with edges}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Mathematik, Arbeitsgruppe Partiell}, publisher = {Univ.}, address = {Potsdam}, issn = {1437-739X}, pages = {41 S.}, year = {2004}, language = {en} } @article{SugiyamaKawanabeMueller2004, author = {Sugiyama, Masashi and Kawanabe, Motoaki and M{\"u}ller, Klaus-Robert}, title = {Trading variance reduction with unbiasedness : the regularized subspace information criterion for robust model selection in kernel regression}, issn = {0899-7667}, year = {2004}, abstract = {A well-known result by Stein (1956) shows that in particular situations, biased estimators can yield better parameter estimates than their generally preferred unbiased counterparts. This letter follows the same spirit, as we will stabilize the unbiased generalization error estimates by regularization and finally obtain more robust model selection criteria for learning. We trade a small bias against a larger variance reduction, which has the beneficial effect of being more precise on a single training set. We focus on the subspace information criterion (SIC), which is an unbiased estimator of the expected generalization error measured by the reproducing kernel Hilbert space norm. SIC can be applied to the kernel regression, and it was shown in earlier experiments that a small regularization of SIC has a stabilization effect. However, it remained open how to appropriately determine the degree of regularization in SIC. In this article, we derive an unbiased estimator of the expected squared error, between SIC and the expected generalization error and propose determining the degree of regularization of SIC such that the estimator of the expected squared error is minimized. Computer simulations with artificial and real data sets illustrate that the proposed method works effectively for improving the precision of SIC, especially in the high-noise-level cases. We furthermore compare the proposed method to the original SIC, the cross-validation, and an empirical Bayesian method in ridge parameter selection, with good results}, language = {en} } @misc{Strickroth2019, author = {Strickroth, Sven}, title = {PLATON}, series = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, journal = {Postprints der Universit{\"a}t Potsdam : Mathematisch-Naturwissenschaftliche Reihe}, number = {804}, issn = {1866-8372}, doi = {10.25932/publishup-44188}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-441887}, pages = {28}, year = {2019}, abstract = {Lesson planning is both an important and demanding task—especially as part of teacher training. This paper presents the requirements for a lesson planning system and evaluates existing systems regarding these requirements. One major drawback of existing software tools is that most are limited to a text- or form-based representation of the lesson designs. In this article, a new approach with a graphical, time-based representation with (automatic) analyses methods is proposed and the system architecture and domain model are described in detail. The approach is implemented in an interactive, web-based prototype called PLATON, which additionally supports the management of lessons in units as well as the modelling of teacher and student-generated resources. The prototype was evaluated in a study with 61 prospective teachers (bachelor's and master's preservice teachers as well as teacher trainees in post-university teacher training) in Berlin, Germany, with a focus on usability. The results show that this approach proofed usable for lesson planning and offers positive effects for the perception of time and self-reflection.}, language = {en} } @article{Strickroth2019, author = {Strickroth, Sven}, title = {PLATON}, series = {Education Sciences}, volume = {9}, journal = {Education Sciences}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2227-7102}, doi = {10.3390/educsci9040254}, pages = {26}, year = {2019}, abstract = {Lesson planning is both an important and demanding task—especially as part of teacher training. This paper presents the requirements for a lesson planning system and evaluates existing systems regarding these requirements. One major drawback of existing software tools is that most are limited to a text- or form-based representation of the lesson designs. In this article, a new approach with a graphical, time-based representation with (automatic) analyses methods is proposed and the system architecture and domain model are described in detail. The approach is implemented in an interactive, web-based prototype called PLATON, which additionally supports the management of lessons in units as well as the modelling of teacher and student-generated resources. The prototype was evaluated in a study with 61 prospective teachers (bachelor's and master's preservice teachers as well as teacher trainees in post-university teacher training) in Berlin, Germany, with a focus on usability. The results show that this approach proofed usable for lesson planning and offers positive effects for the perception of time and self-reflection.}, language = {en} } @article{StoffelKunzGerber1997, author = {Stoffel, Dominik and Kunz, Wolfgang and Gerber, Stefan}, title = {And/Or reasoning graphs for determining prime implicants in multi-level combinational networks}, year = {1997}, language = {en} } @article{StoffelKunz1997, author = {Stoffel, Dominik and Kunz, Wolfgang}, title = {Record \& play : a structural fixed point iteration for sequential circuit verification}, isbn = {0-8186-8200-0}, year = {1997}, language = {en} } @book{StoffelKunz1997, author = {Stoffel, Dominik and Kunz, Wolfgang}, title = {Structural FSM traversal : theory and a practical algorithm}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1997, 05}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ. Potsdam}, address = {Potsdam}, issn = {0946-7580}, pages = {34 S. : Ill.}, year = {1997}, language = {en} } @article{StoffelKunz1996, author = {Stoffel, Dominik and Kunz, Wolfgang}, title = {Logic equivalence checking by optimization techniues}, year = {1996}, language = {en} } @article{SteuerHumburgSelbig2006, author = {Steuer, Ralf and Humburg, Peter and Selbig, Joachim}, title = {Validation and functional annotation of expression-based clusters based on gene ontology}, series = {BMC bioinformatics}, volume = {7}, journal = {BMC bioinformatics}, number = {380}, publisher = {BioMed Central}, address = {London}, issn = {1471-2105}, doi = {10.1186/1471-2105-7-380}, pages = {12}, year = {2006}, abstract = {Background: The biological interpretation of large-scale gene expression data is one of the paramount challenges in current bioinformatics. In particular, placing the results in the context of other available functional genomics data, such as existing bio-ontologies, has already provided substantial improvement for detecting and categorizing genes of interest. One common approach is to look for functional annotations that are significantly enriched within a group or cluster of genes, as compared to a reference group. Results: In this work, we suggest the information-theoretic concept of mutual information to investigate the relationship between groups of genes, as given by data-driven clustering, and their respective functional categories. Drawing upon related approaches (Gibbons and Roth, Genome Research 12: 1574-1581, 2002), we seek to quantify to what extent individual attributes are sufficient to characterize a given group or cluster of genes. Conclusion: We show that the mutual information provides a systematic framework to assess the relationship between groups or clusters of genes and their functional annotations in a quantitative way. Within this framework, the mutual information allows us to address and incorporate several important issues, such as the interdependence of functional annotations and combinatorial combinations of attributes. It thus supplements and extends the conventional search for overrepresented attributes within a group or cluster of genes. In particular taking combinations of attributes into account, the mutual information opens the way to uncover specific functional descriptions of a group of genes or clustering result. All datasets and functional annotations used in this study are publicly available. All scripts used in the analysis are provided as additional files.}, language = {en} } @article{SteinertStabernack2022, author = {Steinert, Fritjof and Stabernack, Benno}, title = {Architecture of a low latency H.264/AVC video codec for robust ML based image classification how region of interests can minimize the impact of coding artifacts}, series = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, volume = {94}, journal = {Journal of Signal Processing Systems for Signal, Image, and Video Technology}, number = {7}, publisher = {Springer}, address = {New York}, issn = {1939-8018}, doi = {10.1007/s11265-021-01727-2}, pages = {693 -- 708}, year = {2022}, abstract = {The use of neural networks is considered as the state of the art in the field of image classification. A large number of different networks are available for this purpose, which, appropriately trained, permit a high level of classification accuracy. Typically, these networks are applied to uncompressed image data, since a corresponding training was also carried out using image data of similar high quality. However, if image data contains image errors, the classification accuracy deteriorates drastically. This applies in particular to coding artifacts which occur due to image and video compression. Typical application scenarios for video compression are narrowband transmission channels for which video coding is required but a subsequent classification is to be carried out on the receiver side. In this paper we present a special H.264/Advanced Video Codec (AVC) based video codec that allows certain regions of a picture to be coded with near constant picture quality in order to allow a reliable classification using neural networks, whereas the remaining image will be coded using constant bit rate. We have combined this feature with the ability to run with lowest latency properties, which is usually also required in remote control applications scenarios. The codec has been implemented as a fully hardwired High Definition video capable hardware architecture which is suitable for Field Programmable Gate Arrays.}, language = {en} } @article{SteinertHirschfeld2012, author = {Steinert, Bastian and Hirschfeld, Robert}, title = {Applying design knowledge to programming}, year = {2012}, language = {en} } @article{SteinertCassouHirschfeld2013, author = {Steinert, Bastian and Cassou, Damien and Hirschfeld, Robert}, title = {CoExist overcoming aversion to change preserving immediate access to source code and run-time information of previous development states}, series = {ACM SIGPLAN notices}, volume = {48}, journal = {ACM SIGPLAN notices}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0362-1340}, doi = {10.1145/2480360.2384591}, pages = {107 -- 117}, year = {2013}, abstract = {Programmers make many changes to the program to eventually find a good solution for a given task. In this course of change, every intermediate development state can of value, when, for example, a promising ideas suddenly turn out inappropriate or the interplay of objects turns out more complex than initially expected before making changes. Programmers would benefit from tool support that provides immediate access to source code and run-time of previous development states of interest. We present IDE extensions, implemented for Squeak/Smalltalk, to preserve, retrieve, and work with this information. With such tool support, programmers can work without worries because they can rely on tools that help them with whatever their explorations will reveal. They no longer have to follow certain best practices only to avoid undesired consequences of changing code.}, language = {en} } @article{Stede2020, author = {Stede, Manfred}, title = {From connectives to coherence relations}, series = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, volume = {65}, journal = {Revue roumaine de linguistique : RRL = Romanian review of linguistics}, number = {3}, publisher = {Ed. Academiei Rom{\^a}ne}, address = {Bucure{\c{s}}ti}, issn = {0035-3957}, pages = {213 -- 233}, year = {2020}, abstract = {The notion of coherence relations is quite widely accepted in general, but concrete proposals differ considerably on the questions of how they should be motivated, which relations are to be assumed, and how they should be defined. This paper takes a "bottom-up" perspective by assessing the contribution made by linguistic signals (connectives), using insights from the relevant literature as well as verification by practical text annotation. We work primarily with the German language here and focus on the realm of contrast. Thus, we suggest a new inventory of contrastive connective functions and discuss their relationship to contrastive coherence relations that have been proposed in earlier work.}, language = {en} } @article{SrinivasanSenthilkumarMohamedetal.2012, author = {Srinivasan, K. and Senthilkumar, D. V. and Mohamed, I. Raja and Murali, K. and Lakshmanan, M. and Kurths, J{\"u}rgen}, title = {Anticipating, complete and lag synchronizations in RC phase-shift network based coupled Chua's circuits without delay}, series = {Chaos : an interdisciplinary journal of nonlinear science}, volume = {22}, journal = {Chaos : an interdisciplinary journal of nonlinear science}, number = {2}, publisher = {American Institute of Physics}, address = {Melville}, issn = {1054-1500}, doi = {10.1063/1.4711375}, pages = {8}, year = {2012}, abstract = {We construct a new RC phase shift network based Chua's circuit, which exhibits a period-doubling bifurcation route to chaos. Using coupled versions of such a phase-shift network based Chua's oscillators, we describe a new method for achieving complete synchronization (CS), approximate lag synchronization (LS), and approximate anticipating synchronization (AS) without delay or parameter mismatch. Employing the Pecora and Carroll approach, chaos synchronization is achieved in coupled chaotic oscillators, where the drive system variables control the response system. As a result, AS or LS or CS is demonstrated without using a variable delay line both experimentally and numerically.}, language = {en} } @article{Sommerfeld1994, author = {Sommerfeld, Erdmute}, title = {Operations on cognitive structures : their modelling on the basis of graph theory}, year = {1994}, language = {en} } @article{SogomonyanSinghGoessel1998, author = {Sogomonyan, Egor S. and Singh, Adit D. and G{\"o}ssel, Michael}, title = {A scan based concrrent BIST approach for low cost on-line testing}, year = {1998}, language = {en} } @article{SogomonyanSinghGoessel1999, author = {Sogomonyan, Egor S. and Singh, Adit D. and G{\"o}ssel, Michael}, title = {A multi-mode scannable memory element for high test application efficiency and delay testing}, year = {1999}, language = {en} } @article{SogomonyanSinghGoessel1998, author = {Sogomonyan, Egor S. and Singh, Adit D. and G{\"o}ssel, Michael}, title = {A multi-mode scannable memory element for high test application efficiency and delay testing}, year = {1998}, language = {en} } @book{SogomonyanMarienfeldOcheretnijetal.2003, author = {Sogomonyan, Egor S. and Marienfeld, Daniel and Ocheretnij, V. and G{\"o}ssel, Michael}, title = {A new self-checking sum-bit duplicated carry-select adder}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {2003, 5}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {10 S.}, year = {2003}, language = {en} } @article{SogomonyanGoessel1995, author = {Sogomonyan, Egor S. and G{\"o}ssel, Michael}, title = {A new parity preserving multi-input signature analyser}, year = {1995}, language = {en} } @article{SogomonyanGoessel1996, author = {Sogomonyan, Egor S. and G{\"o}ssel, Michael}, title = {Concurrently self-testing embedded checkers for ultra-reliable fault-tolerant systems}, year = {1996}, language = {en} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @article{SinghSogomonyanGoesseletal.1999, author = {Singh, Adit D. and Sogomonyan, Egor S. and G{\"o}ssel, Michael and Seuring, Markus}, title = {Testability evaluation of sequential designs incorporating the multi-mode scannable memory element}, year = {1999}, language = {en} } @article{ShenoyKrauledatBlankertzetal.2006, author = {Shenoy, Pradeep and Krauledat, Matthias and Blankertz, Benjamin and Rao, Rajesh P. N. and M{\"u}ller, Klaus-Robert}, title = {Towards adaptive classification for BCI}, doi = {10.1088/1741-2560/3/1/R02}, year = {2006}, abstract = {Non-stationarities are ubiquitous in EEG signals. They are especially apparent in the use of EEG-based brain- computer interfaces (BCIs): (a) in the differences between the initial calibration measurement and the online operation of a BCI, or (b) caused by changes in the subject's brain processes during an experiment (e.g. due to fatigue, change of task involvement, etc). In this paper, we quantify for the first time such systematic evidence of statistical differences in data recorded during offline and online sessions. Furthermore, we propose novel techniques of investigating and visualizing data distributions, which are particularly useful for the analysis of (non-) stationarities. Our study shows that the brain signals used for control can change substantially from the offline calibration sessions to online control, and also within a single session. In addition to this general characterization of the signals, we propose several adaptive classification schemes and study their performance on data recorded during online experiments. An encouraging result of our study is that surprisingly simple adaptive methods in combination with an offline feature selection scheme can significantly increase BCI performance}, language = {en} } @article{SeuringGoesselSogomonyan1998, author = {Seuring, Markus and G{\"o}ssel, Michael and Sogomonyan, Egor S.}, title = {A structural approach for space compaction for concurrent checking and BIST}, year = {1998}, language = {en} } @book{SeuringGoesselSogomonyan1997, author = {Seuring, Markus and G{\"o}ssel, Michael and Sogomonyan, Egor S.}, title = {A structural approach for space compaction for concurrent checking and BIST}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1997, 01}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ. Potsdam}, address = {Potsdam [u.a.]}, issn = {0946-7580}, pages = {19 S. : Ill.}, year = {1997}, language = {en} } @article{SeuringGoessel1999, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural approach for space compaction for sequential circuits}, year = {1999}, language = {en} } @article{SeuringGoessel1999, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural method for output compaction of sequential automata implemented as circuits}, year = {1999}, language = {en} } @book{SeuringGoessel1998, author = {Seuring, Markus and G{\"o}ssel, Michael}, title = {A structural approach for space compaction for sequential circuits}, series = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, volume = {1998, 05}, journal = {Preprint / Universit{\"a}t Potsdam, Institut f{\"u}r Informatik}, publisher = {Univ.}, address = {Potsdam}, issn = {0946-7580}, pages = {16 Bl. : graph. Darst.}, year = {1998}, language = {en} } @article{Seuring1999, author = {Seuring, Markus}, title = {Built-in self test mit multi-mode scannable memory elementen}, year = {1999}, language = {en} } @phdthesis{Seuring2000, author = {Seuring, Markus}, title = {Output space compaction for testing and concurrent checking}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-0000165}, school = {Universit{\"a}t Potsdam}, year = {2000}, abstract = {In der Dissertation werden neue Entwurfsmethoden f{\"u}r Kompaktoren f{\"u}r die Ausg{\"a}nge von digitalen Schaltungen beschrieben, die die Anzahl der zu testenden Ausg{\"a}nge drastisch verkleinern und dabei die Testbarkeit der Schaltungen nur wenig oder gar nicht verschlechtern. Der erste Teil der Arbeit behandelt f{\"u}r kombinatorische Schaltungen Methoden, die die Struktur der Schaltungen beim Entwurf der Kompaktoren ber{\"u}cksichtigen. Verschiedene Algorithmen zur Analyse von Schaltungsstrukturen werden zum ersten Mal vorgestellt und untersucht. Die Komplexit{\"a}t der vorgestellten Verfahren zur Erzeugung von Kompaktoren ist linear bez{\"u}glich der Anzahl der Gatter in der Schaltung und ist damit auf sehr große Schaltungen anwendbar. Im zweiten Teil wird erstmals ein solches Verfahren f{\"u}r sequentielle Schaltkreise beschrieben. Dieses Verfahren baut im wesentlichen auf das erste auf. Der dritte Teil beschreibt eine Entwurfsmethode, die keine Informationen {\"u}ber die interne Struktur der Schaltung oder {\"u}ber das zugrundeliegende Fehlermodell ben{\"o}tigt. Der Entwurf basiert alleine auf einem vorgegebenen Satz von Testvektoren und die dazugeh{\"o}renden Testantworten der fehlerfreien Schaltung. Ein nach diesem Verfahren erzeugter Kompaktor maskiert keinen der Fehler, die durch das Testen mit den vorgegebenen Vektoren an den Ausg{\"a}ngen der Schaltung beobachtbar sind.}, language = {en} } @article{Sens2014, author = {Sens, Henriette}, title = {Web-Based map generalization tools put to the test: a jABC workflow}, series = {Process Design for Natural Scientists: an agile model-driven approach}, journal = {Process Design for Natural Scientists: an agile model-driven approach}, number = {500}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-45005-5}, issn = {1865-0929}, pages = {175 -- 185}, year = {2014}, abstract = {Geometric generalization is a fundamental concept in the digital mapping process. An increasing amount of spatial data is provided on the web as well as a range of tools to process it. This jABC workflow is used for the automatic testing of web-based generalization services like mapshaper.org by executing its functionality, overlaying both datasets before and after the transformation and displaying them visually in a .tif file. Mostly Web Services and command line tools are used to build an environment where ESRI shapefiles can be uploaded, processed through a chosen generalization service and finally visualized in Irfanview.}, language = {en} }