@phdthesis{Wist2011, author = {Wist, Dominic}, title = {Attacking complexity in logic synthesis of asynchronous circuits}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59706}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Most of the microelectronic circuits fabricated today are synchronous, i.e. they are driven by one or several clock signals. Synchronous circuit design faces several fundamental challenges such as high-speed clock distribution, integration of multiple cores operating at different clock rates, reduction of power consumption and dealing with voltage, temperature, manufacturing and runtime variations. Asynchronous or clockless design plays a key role in alleviating these challenges, however the design and test of asynchronous circuits is much more difficult in comparison to their synchronous counterparts. A driving force for a widespread use of asynchronous technology is the availability of mature EDA (Electronic Design Automation) tools which provide an entire automated design flow starting from an HDL (Hardware Description Language) specification yielding the final circuit layout. Even though there was much progress in developing such EDA tools for asynchronous circuit design during the last two decades, the maturity level as well as the acceptance of them is still not comparable with tools for synchronous circuit design. In particular, logic synthesis (which implies the application of Boolean minimisation techniques) for the entire system's control path can significantly improve the efficiency of the resulting asynchronous implementation, e.g. in terms of chip area and performance. However, logic synthesis, in particular for asynchronous circuits, suffers from complexity problems. Signal Transitions Graphs (STGs) are labelled Petri nets which are a widely used to specify the interface behaviour of speed independent (SI) circuits - a robust subclass of asynchronous circuits. STG decomposition is a promising approach to tackle complexity problems like state space explosion in logic synthesis of SI circuits. The (structural) decomposition of STGs is guided by a partition of the output signals and generates a usually much smaller component STG for each partition member, i.e. a component STG with a much smaller state space than the initial specification. However, decomposition can result in component STGs that in isolation have so-called irreducible CSC conflicts (i.e. these components are not SI synthesisable anymore) even if the specification has none of them. A new approach is presented to avoid such conflicts by introducing internal communication between the components. So far, STG decompositions are guided by the finest output partitions, i.e. one output per component. However, this might not yield optimal circuit implementations. Efficient heuristics are presented to determine coarser partitions leading to improved circuits in terms of chip area. For the new algorithms correctness proofs are given and their implementations are incorporated into the decomposition tool DESIJ. The presented techniques are successfully applied to some benchmarks - including 'real-life' specifications arising in the context of control resynthesis - which delivered promising results.}, language = {en} } @article{WeidlichMendlingWeske2011, author = {Weidlich, Matthias and Mendling, Jan and Weske, Mathias}, title = {Efficient consistency measurement based on behavioral profiles of process models}, series = {IEEE transactions on software engineering}, volume = {37}, journal = {IEEE transactions on software engineering}, number = {3}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0098-5589}, doi = {10.1109/TSE.2010.96}, pages = {410 -- 429}, year = {2011}, abstract = {Engineering of process-driven business applications can be supported by process modeling efforts in order to bridge the gap between business requirements and system specifications. However, diverging purposes of business process modeling initiatives have led to significant problems in aligning related models at different abstract levels and different perspectives. Checking the consistency of such corresponding models is a major challenge for process modeling theory and practice. In this paper, we take the inappropriateness of existing strict notions of behavioral equivalence as a starting point. Our contribution is a concept called behavioral profile that captures the essential behavioral constraints of a process model. We show that these profiles can be computed efficiently, i.e., in cubic time for sound free-choice Petri nets w.r.t. their number of places and transitions. We use behavioral profiles for the definition of a formal notion of consistency which is less sensitive to model projections than common criteria of behavioral equivalence and allows for quantifying deviation in a metric way. The derivation of behavioral profiles and the calculation of a degree of consistency have been implemented to demonstrate the applicability of our approach. We also report the findings from checking consistency between partially overlapping models of the SAP reference model.}, language = {en} } @phdthesis{Wang2011, author = {Wang, Long}, title = {X-tracking the usage interest on web sites}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-51077}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {The exponential expanding of the numbers of web sites and Internet users makes WWW the most important global information resource. From information publishing and electronic commerce to entertainment and social networking, the Web allows an inexpensive and efficient access to the services provided by individuals and institutions. The basic units for distributing these services are the web sites scattered throughout the world. However, the extreme fragility of web services and content, the high competence between similar services supplied by different sites, and the wide geographic distributions of the web users drive the urgent requirement from the web managers to track and understand the usage interest of their web customers. This thesis, "X-tracking the Usage Interest on Web Sites", aims to fulfill this requirement. "X" stands two meanings: one is that the usage interest differs from various web sites, and the other is that usage interest is depicted from multi aspects: internal and external, structural and conceptual, objective and subjective. "Tracking" shows that our concentration is on locating and measuring the differences and changes among usage patterns. This thesis presents the methodologies on discovering usage interest on three kinds of web sites: the public information portal site, e-learning site that provides kinds of streaming lectures and social site that supplies the public discussions on IT issues. On different sites, we concentrate on different issues related with mining usage interest. The educational information portal sites were the first implementation scenarios on discovering usage patterns and optimizing the organization of web services. In such cases, the usage patterns are modeled as frequent page sets, navigation paths, navigation structures or graphs. However, a necessary requirement is to rebuild the individual behaviors from usage history. We give a systematic study on how to rebuild individual behaviors. Besides, this thesis shows a new strategy on building content clusters based on pair browsing retrieved from usage logs. The difference between such clusters and the original web structure displays the distance between the destinations from usage side and the expectations from design side. Moreover, we study the problem on tracking the changes of usage patterns in their life cycles. The changes are described from internal side integrating conceptual and structure features, and from external side for the physical features; and described from local side measuring the difference between two time spans, and global side showing the change tendency along the life cycle. A platform, Web-Cares, is developed to discover the usage interest, to measure the difference between usage interest and site expectation and to track the changes of usage patterns. E-learning site provides the teaching materials such as slides, recorded lecture videos and exercise sheets. We focus on discovering the learning interest on streaming lectures, such as real medias, mp4 and flash clips. Compared to the information portal site, the usage on streaming lectures encapsulates the variables such as viewing time and actions during learning processes. The learning interest is discovered in the form of answering 6 questions, which covers finding the relations between pieces of lectures and the preference among different forms of lectures. We prefer on detecting the changes of learning interest on the same course from different semesters. The differences on the content and structure between two courses leverage the changes on the learning interest. We give an algorithm on measuring the difference on learning interest integrated with similarity comparison between courses. A search engine, TASK-Moniminer, is created to help the teacher query the learning interest on their streaming lectures on tele-TASK site. Social site acts as an online community attracting web users to discuss the common topics and share their interesting information. Compared to the public information portal site and e-learning web site, the rich interactions among users and web content bring the wider range of content quality, on the other hand, provide more possibilities to express and model usage interest. We propose a framework on finding and recommending high reputation articles in a social site. We observed that the reputation is classified into global and local categories; the quality of the articles having high reputation is related with the content features. Based on these observations, our framework is implemented firstly by finding the articles having global or local reputation, and secondly clustering articles based on their content relations, and then the articles are selected and recommended from each cluster based on their reputation ranks.}, language = {en} } @article{UflackerKowarkZeier2011, author = {Uflacker, Matthias and Kowark, Thomas and Zeier, Alexander}, title = {An instrument for real-time design interaction capture}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @article{ThonLandwehrDeRaedt2011, author = {Thon, Ingo and Landwehr, Niels and De Raedt, Luc}, title = {Stochastic relational processes efficient inference and applications}, series = {Machine learning}, volume = {82}, journal = {Machine learning}, number = {2}, publisher = {Springer}, address = {Dordrecht}, issn = {0885-6125}, doi = {10.1007/s10994-010-5213-8}, pages = {239 -- 272}, year = {2011}, abstract = {One of the goals of artificial intelligence is to develop agents that learn and act in complex environments. Realistic environments typically feature a variable number of objects, relations amongst them, and non-deterministic transition behavior. While standard probabilistic sequence models provide efficient inference and learning techniques for sequential data, they typically cannot fully capture the relational complexity. On the other hand, statistical relational learning techniques are often too inefficient to cope with complex sequential data. In this paper, we introduce a simple model that occupies an intermediate position in this expressiveness/efficiency trade-off. It is based on CP-logic (Causal Probabilistic Logic), an expressive probabilistic logic for modeling causality. However, by specializing CP-logic to represent a probability distribution over sequences of relational state descriptions and employing a Markov assumption, inference and learning become more tractable and effective. Specifically, we show how to solve part of the inference and learning problems directly at the first-order level, while transforming the remaining part into the problem of computing all satisfying assignments for a Boolean formula in a binary decision diagram. We experimentally validate that the resulting technique is able to handle probabilistic relational domains with a substantial number of objects and relations.}, language = {en} } @article{ThienenNoweskiMeineletal.2011, author = {Thienen, Julia von and Noweski, Christine and Meinel, Christoph and Rauth, Ingo}, title = {The co-evolution of theory and practice in design thinking - or - "Mind the oddness trap!"}, isbn = {978-3-642-13756-3}, year = {2011}, language = {en} } @phdthesis{Thiele2011, author = {Thiele, Sven}, title = {Modeling biological systems with Answer Set Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-59383}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Biology has made great progress in identifying and measuring the building blocks of life. The availability of high-throughput methods in molecular biology has dramatically accelerated the growth of biological knowledge for various organisms. The advancements in genomic, proteomic and metabolomic technologies allow for constructing complex models of biological systems. An increasing number of biological repositories is available on the web, incorporating thousands of biochemical reactions and genetic regulations. Systems Biology is a recent research trend in life science, which fosters a systemic view on biology. In Systems Biology one is interested in integrating the knowledge from all these different sources into models that capture the interaction of these entities. By studying these models one wants to understand the emerging properties of the whole system, such as robustness. However, both measurements as well as biological networks are prone to considerable incompleteness, heterogeneity and mutual inconsistency, which makes it highly non-trivial to draw biologically meaningful conclusions in an automated way. Therefore, we want to promote Answer Set Programming (ASP) as a tool for discrete modeling in Systems Biology. ASP is a declarative problem solving paradigm, in which a problem is encoded as a logic program such that its answer sets represent solutions to the problem. ASP has intrinsic features to cope with incompleteness, offers a rich modeling language and highly efficient solving technology. We present ASP solutions, for the analysis of genetic regulatory networks, determining consistency with observed measurements and identifying minimal causes for inconsistency. We extend this approach for computing minimal repairs on model and data that restore consistency. This method allows for predicting unobserved data even in case of inconsistency. Further, we present an ASP approach to metabolic network expansion. This approach exploits the easy characterization of reachability in ASP and its various reasoning methods, to explore the biosynthetic capabilities of metabolic reaction networks and generate hypotheses for extending the network. Finally, we present the BioASP library, a Python library which encapsulates our ASP solutions into the imperative programming paradigm. The library allows for an easy integration of ASP solution into system rich environments, as they exist in Systems Biology.}, language = {en} } @phdthesis{Smirnov2011, author = {Smirnov, Sergey}, title = {Business process model abstraction}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-60258}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Business process models are used within a range of organizational initiatives, where every stakeholder has a unique perspective on a process and demands the respective model. As a consequence, multiple process models capturing the very same business process coexist. Keeping such models in sync is a challenge within an ever changing business environment: once a process is changed, all its models have to be updated. Due to a large number of models and their complex relations, model maintenance becomes error-prone and expensive. Against this background, business process model abstraction emerged as an operation reducing the number of stored process models and facilitating model management. Business process model abstraction is an operation preserving essential process properties and leaving out insignificant details in order to retain information relevant for a particular purpose. Process model abstraction has been addressed by several researchers. The focus of their studies has been on particular use cases and model transformations supporting these use cases. This thesis systematically approaches the problem of business process model abstraction shaping the outcome into a framework. We investigate the current industry demand in abstraction summarizing it in a catalog of business process model abstraction use cases. The thesis focuses on one prominent use case where the user demands a model with coarse-grained activities and overall process ordering constraints. We develop model transformations that support this use case starting with the transformations based on process model structure analysis. Further, abstraction methods considering the semantics of process model elements are investigated. First, we suggest how semantically related activities can be discovered in process models-a barely researched challenge. The thesis validates the designed abstraction methods against sets of industrial process models and discusses the method implementation aspects. Second, we develop a novel model transformation, which combined with the related activity discovery allows flexible non-hierarchical abstraction. In this way this thesis advocates novel model transformations that facilitate business process model management and provides the foundations for innovative tool support.}, language = {en} } @phdthesis{Schuenemann2011, author = {Sch{\"u}nemann, Bj{\"o}rn}, title = {The V2X simulation runtime infrastructure: VSimRTI}, address = {Potsdam}, pages = {163 S.}, year = {2011}, language = {en} } @book{SchubertSchwill2011, author = {Schubert, Sigrid and Schwill, Andreas}, title = {Didaktik der Informatik}, publisher = {Spektrum Akademischer Verlag}, address = {Heidelberg}, isbn = {978-3-8274-2652-9}, pages = {417 S.}, year = {2011}, language = {de} } @phdthesis{Roschke2011, author = {Roschke, Sebastian}, title = {Towards high quality security event correlation using in-memory and multi-core processing}, address = {Potsdam}, pages = {131 S.}, year = {2011}, language = {en} } @unpublished{RosamondBardohlDiehletal.2011, author = {Rosamond, Frances and Bardohl, Roswitha and Diehl, Stephan and Geisler, Uwe and Bolduan, Gordon and Lessmoellmann, Annette and Schwill, Andreas and Stege, Ulrike}, title = {Virtual extension reaching out to the media become a computer science ambassador}, series = {Communications of the ACM / Association for Computing Machinery}, volume = {54}, journal = {Communications of the ACM / Association for Computing Machinery}, number = {3}, publisher = {Association for Computing Machinery}, address = {New York}, issn = {0001-0782}, doi = {10.1145/1897852.1897880}, pages = {113 -- 116}, year = {2011}, language = {en} } @phdthesis{Richter2011, author = {Richter, Michael}, title = {Anwendung nichtlinearer Codes zur Fehlererkennung und -korrektur}, address = {Potsdam}, pages = {134 S.}, year = {2011}, language = {de} } @article{RabenaltGoesselLeininger2011, author = {Rabenalt, Thomas and Goessel, Michael and Leininger, Andreas}, title = {Masking of X-Values by use of a hierarchically configurable register}, series = {Journal of electronic testing : theory and applications}, volume = {27}, journal = {Journal of electronic testing : theory and applications}, number = {1}, publisher = {Springer}, address = {Dordrecht}, issn = {0923-8174}, doi = {10.1007/s10836-010-5179-2}, pages = {31 -- 41}, year = {2011}, abstract = {In this paper we consider masking of unknowns (X-values) for VLSI circuits. We present a new hierarchical method of X-masking which is a major improvement of the method proposed in [4], called WIDE1. By the method proposed, the number of observable scan cells is optimized and data volume for X-masking can be significantly reduced in comparison to WIDEL This is demonstrated for three industrial designs. In cases where all X-values have to be masked the novel approach is especially efficient.}, language = {en} } @phdthesis{Rabenalt2011, author = {Rabenalt, Thomas}, title = {Datenkompaktierung f{\"u}r Diagnose und Test}, address = {Potsdam}, pages = {116 S.}, year = {2011}, language = {de} } @phdthesis{Quasthoff2011, author = {Quasthoff, Matthias}, title = {Effizientes Entwickeln von Semantic-Web-Software mit Object Triple Mapping}, address = {Potsdam}, pages = {138 S.}, year = {2011}, language = {de} } @article{PolyvyanyyWeidlichWeske2011, author = {Polyvyanyy, Artem and Weidlich, Matthias and Weske, Mathias}, title = {Connectivity of workflow nets the foundations of stepwise verification}, series = {Acta informatica}, volume = {48}, journal = {Acta informatica}, number = {4}, publisher = {Springer}, address = {New York}, issn = {0001-5903}, doi = {10.1007/s00236-011-0137-8}, pages = {213 -- 242}, year = {2011}, abstract = {Behavioral models capture operational principles of real-world or designed systems. Formally, each behavioral model defines the state space of a system, i.e., its states and the principles of state transitions. Such a model is the basis for analysis of the system's properties. In practice, state spaces of systems are immense, which results in huge computational complexity for their analysis. Behavioral models are typically described as executable graphs, whose execution semantics encodes a state space. The structure theory of behavioral models studies the relations between the structure of a model and the properties of its state space. In this article, we use the connectivity property of graphs to achieve an efficient and extensive discovery of the compositional structure of behavioral models; behavioral models get stepwise decomposed into components with clear structural characteristics and inter-component relations. At each decomposition step, the discovered compositional structure of a model is used for reasoning on properties of the whole state space of the system. The approach is exemplified by means of a concrete behavioral model and verification criterion. That is, we analyze workflow nets, a well-established tool for modeling behavior of distributed systems, with respect to the soundness property, a basic correctness property of workflow nets. Stepwise verification allows the detection of violations of the soundness property by inspecting small portions of a model, thereby considerably reducing the amount of work to be done to perform soundness checks. Besides formal results, we also report on findings from applying our approach to an industry model collection.}, language = {en} } @book{PlattnerZeier2011, author = {Plattner, Hasso and Zeier, Alexander}, title = {In-memory data managment : an inflection point for enterprise applications}, publisher = {Springer}, address = {Heidelberg, New York}, isbn = {978-3-642-19362-0}, pages = {236 S.}, year = {2011}, language = {en} } @misc{PatilHaiderPopeetal.2011, author = {Patil, Kaustubh R. and Haider, Peter and Pope, Phillip B. and Turnbaugh, Peter J. and Morrison, Mark and Scheffer, Tobias and McHardy, Alice C.}, title = {Taxonomic metagenome sequence assignment with structured output models}, series = {Nature methods : techniques for life scientists and chemists}, volume = {8}, journal = {Nature methods : techniques for life scientists and chemists}, number = {3}, publisher = {Nature Publ. Group}, address = {London}, issn = {1548-7091}, doi = {10.1038/nmeth0311-191}, pages = {191 -- 192}, year = {2011}, language = {en} } @phdthesis{Off2011, author = {Off, Thomas}, title = {Durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Softwareentwicklung von E-Government-Anwendungen : ein ontologiebasierter und modellgetriebener Ansatz am Beispiel von B{\"u}rgerdiensten}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-57478}, school = {Universit{\"a}t Potsdam}, year = {2011}, abstract = {Die {\"o}ffentliche Verwaltung setzt seit mehreren Jahren E-Government-Anwendungssysteme ein, um ihre Verwaltungsprozesse intensiver mit moderner Informationstechnik zu unterst{\"u}tzen. Da die {\"o}ffentliche Verwaltung in ihrem Handeln in besonderem Maße an Recht und Gesetz gebunden ist verst{\"a}rkt und verbreitet sich der Zusammenhang zwischen den Gesetzen und Rechtsvorschriften einerseits und der zur Aufgabenunterst{\"u}tzung eingesetzten Informationstechnik andererseits. Aus Sicht der Softwaretechnik handelt es sich bei diesem Zusammenhang um eine spezielle Form der Verfolgbarkeit von Anforderungen (engl. Traceability), die so genannte Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (Pre-Requirements Specification Traceability, kurz Pre-RS Traceability), da sie Aspekte betrifft, die relevant sind, bevor die Anforderungen in eine Spezifikation eingeflossen sind (Urspr{\"u}nge von Anforderungen). Der Ansatz dieser Arbeit leistet einen Beitrag zur Verfolgbarkeit im Vorfeld der Anforderungsspezifikation von E-Government-Anwendungssystemen. Er kombiniert dazu aktuelle Entwicklungen und Standards (insbesondere des World Wide Web Consortium und der Object Management Group) aus den Bereichen Verfolgbarkeit von Anforderungen, Semantic Web, Ontologiesprachen und modellgetriebener Softwareentwicklung. Der L{\"o}sungsansatz umfasst eine spezielle Ontologie des Verwaltungshandeln, die mit den Techniken, Methoden und Werkzeugen des Semantic Web eingesetzt wird, um in Texten von Rechtsvorschriften relevante Urspr{\"u}nge von Anforderungen durch Annotationen mit einer definierten Semantik zu versehen. Darauf aufbauend wird das Ontology Definition Metamodel (ODM) verwendet, um die Annotationen als spezielle Individuen einer Ontologie auf Elemente der Unified Modeling Language (UML) abzubilden. Dadurch entsteht ein neuer Modelltyp Pre-Requirements Model (PRM), der das Vorfeld der Anforderungsspezifikation formalisiert. Modelle diesen Typs k{\"o}nnen auch verwendet werden, um Aspekte zu formalisieren die sich nicht oder nicht vollst{\"a}ndig aus dem Text der Rechtsvorschrift ergeben. Weiterhin bietet das Modell die M{\"o}glichkeit zum Anschluss an die modellgetriebene Softwareentwicklung. In der Arbeit wird deshalb eine Erweiterung der Model Driven Architecture (MDA) vorgeschlagen. Zus{\"a}tzlich zu den etablierten Modelltypen Computation Independent Model (CIM), Platform Independent Model (PIM) und Platform Specific Model (PSM) k{\"o}nnte der Einsatz des PRM Vorteile f{\"u}r die Verfolgbarkeit bringen. Wird die MDA mit dem PRM auf das Vorfeld der Anforderungsspezifikation ausgeweitet, kann eine Transformation des PRM in ein CIM als initiale Anforderungsspezifikation erfolgen, indem der MOF Query View Transformation Standard (QVT) eingesetzt wird. Als Teil des QVT-Standards ist die Aufzeichnung von Verfolgbarkeitsinformationen bei Modelltransformationen verbindlich. Um die semantische L{\"u}cke zwischen PRM und CIM zu {\"u}berbr{\"u}cken, erfolgt analog zum Einsatz des Plattformmodells (PM) in der PIM nach PSM Transformation der Einsatz spezieller Hilfsmodelle. Es kommen daf{\"u}r die im Projekt "E-LoGo" an der Universit{\"a}t Potsdam entwickelten Referenzmodelle zum Einsatz. Durch die Aufzeichnung der Abbildung annotierter Textelemente auf Elemente im PRM und der Transformation der Elemente des PRM in Elemente des CIM kann durchg{\"a}ngige Verfolgbarkeit im Vorfeld der Anforderungsspezifikation erreicht werden. Der Ansatz basiert auf einer so genannten Verfolgbarkeitsdokumentation in Form verlinkter Hypertextdokumente, die mittels XSL-Stylesheet erzeugt wurden und eine Verbindung zur graphischen Darstellung des Diagramms (z. B. Anwendungsfall-, Klassendiagramm der UML) haben. Der Ansatz unterst{\"u}tzt die horizontale Verfolgbarkeit zwischen Elementen unterschiedlicher Modelle vorw{\"a}rts- und r{\"u}ckw{\"a}rtsgerichtet umfassend. Er bietet außerdem vertikale Verfolgbarkeit, die Elemente des gleichen Modells und verschiedener Modellversionen in Beziehung setzt. {\"U}ber den offensichtlichen Nutzen einer durchg{\"a}ngigen Verfolgbarkeit im Vorfeld der Anforderungsspezifikation (z. B. Analyse der Auswirkungen einer Gesetzes{\"a}nderung, Ber{\"u}cksichtigung des vollst{\"a}ndigen Kontextes einer Anforderung bei ihrer Priorisierung) hinausgehend, bietet diese Arbeit eine erste Ansatzm{\"o}glichkeit f{\"u}r eine Feedback-Schleife im Prozess der Gesetzgebung. Stehen beispielsweise mehrere gleichwertige Gestaltungsoptionen eines Gesetzes zur Auswahl, k{\"o}nnen die Auswirkungen jeder Option analysiert und der Aufwand ihrer Umsetzung in E-Government-Anwendungen als Auswahlkriterium ber{\"u}cksichtigt werden. Die am 16. M{\"a}rz 2011 in Kraft getretene {\"A}nderung des NKRG schreibt eine solche Analyse des so genannten „Erf{\"u}llungsaufwands" f{\"u}r Teilbereiche des Verwaltungshandelns bereits heute verbindlich vor. F{\"u}r diese Analyse kann die vorliegende Arbeit einen Ansatz bieten, um zu fundierten Aussagen {\"u}ber den {\"A}nderungsaufwand eingesetzter E-Government-Anwendungssysteme zu kommen.}, language = {de} }