@article{FriedrichKoetzingKrejcaetal.2016, author = {Friedrich, Tobias and K{\"o}tzing, Timo and Krejca, Martin Stefan and Sutton, Andrew M.}, title = {Robustness of Ant Colony Optimization to Noise}, series = {Evolutionary computation}, volume = {24}, journal = {Evolutionary computation}, publisher = {MIT Press}, address = {Cambridge}, issn = {1063-6560}, doi = {10.1162/EVCO_a_00178}, pages = {237 -- 254}, year = {2016}, abstract = {Recently, ant colony optimization (ACO) algorithms have proven to be efficient in uncertain environments, such as noisy or dynamically changing fitness functions. Most of these analyses have focused on combinatorial problems such as path finding. We rigorously analyze an ACO algorithm optimizing linear pseudo- Boolean functions under additive posterior noise. We study noise distributions whose tails decay exponentially fast, including the classical case of additive Gaussian noise. Without noise, the classical (mu + 1) EA outperforms any ACO algorithm, with smaller mu being better; however, in the case of large noise, the (mu + 1) EA fails, even for high values of mu (which are known to help against small noise). In this article, we show that ACO is able to deal with arbitrarily large noise in a graceful manner; that is, as long as the evaporation factor. is small enough, dependent on the variance s2 of the noise and the dimension n of the search space, optimization will be successful. We also briefly consider the case of prior noise and prove that ACO can also efficiently optimize linear functions under this noise model.}, language = {en} } @article{KoetzingKrejca2019, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-hitting times under drift}, series = {Theoretical computer science}, volume = {796}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.08.021}, pages = {51 -- 69}, year = {2019}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be lower-bounded, that is, we remove unnecessary restrictions like a finite, discrete, or bounded state space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift. By bounding the step size of the process, we derive new lower-bounding multiplicative and variable drift theorems. Last, we also state theorems that are applicable when the process has a drift of 0, by using a drift on the variance of the process.}, language = {en} } @article{FriedrichKoetzingKrejca2019, author = {Friedrich, Tobias and K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {Unbiasedness of estimation-of-distribution algorithms}, series = {Theoretical computer science}, volume = {785}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2018.11.001}, pages = {46 -- 59}, year = {2019}, abstract = {In the context of black-box optimization, black-box complexity is used for understanding the inherent difficulty of a given optimization problem. Central to our understanding of nature-inspired search heuristics in this context is the notion of unbiasedness. Specialized black-box complexities have been developed in order to better understand the limitations of these heuristics - especially of (population-based) evolutionary algorithms (EAs). In contrast to this, we focus on a model for algorithms explicitly maintaining a probability distribution over the search space: so-called estimation-of-distribution algorithms (EDAs). We consider the recently introduced n-Bernoulli-lambda-EDA framework, which subsumes, for example, the commonly known EDAs PBIL, UMDA, lambda-MMAS(IB), and cGA. We show that an n-Bernoulli-lambda-EDA is unbiased if and only if its probability distribution satisfies a certain invariance property under isometric automorphisms of [0, 1](n). By restricting how an n-Bernoulli-lambda-EDA can perform an update, in a way common to many examples, we derive conciser characterizations, which are easy to verify. We demonstrate this by showing that our examples above are all unbiased. (C) 2018 Elsevier B.V. All rights reserved.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times under additive drift}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_8}, pages = {92 -- 104}, year = {2018}, abstract = {For the last ten years, almost every theoretical result concerning the expected run time of a randomized search heuristic used drift theory, making it the arguably most important tool in this domain. Its success is due to its ease of use and its powerful result: drift theory allows the user to derive bounds on the expected first-hitting time of a random process by bounding expected local changes of the process - the drift. This is usually far easier than bounding the expected first-hitting time directly. Due to the widespread use of drift theory, it is of utmost importance to have the best drift theorems possible. We improve the fundamental additive, multiplicative, and variable drift theorems by stating them in a form as general as possible and providing examples of why the restrictions we keep are still necessary. Our additive drift theorem for upper bounds only requires the process to be nonnegative, that is, we remove unnecessary restrictions like a finite, discrete, or bounded search space. As corollaries, the same is true for our upper bounds in the case of variable and multiplicative drift.}, language = {en} } @misc{KoetzingKrejca2018, author = {K{\"o}tzing, Timo and Krejca, Martin Stefan}, title = {First-Hitting times for finite state spaces}, series = {Parallel Problem Solving from Nature - PPSN XV, PT II}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV, PT II}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_7}, pages = {79 -- 91}, year = {2018}, abstract = {One of the most important aspects of a randomized algorithm is bounding its expected run time on various problems. Formally speaking, this means bounding the expected first-hitting time of a random process. The two arguably most popular tools to do so are the fitness level method and drift theory. The fitness level method considers arbitrary transition probabilities but only allows the process to move toward the goal. On the other hand, drift theory allows the process to move into any direction as long as it move closer to the goal in expectation; however, this tendency has to be monotone and, thus, the transition probabilities cannot be arbitrary. We provide a result that combines the benefit of these two approaches: our result gives a lower and an upper bound for the expected first-hitting time of a random process over {0,..., n} that is allowed to move forward and backward by 1 and can use arbitrary transition probabilities. In case that the transition probabilities are known, our bounds coincide and yield the exact value of the expected first-hitting time. Further, we also state the stationary distribution as well as the mixing time of a special case of our scenario.}, language = {en} } @misc{KoetzingLagodzinskiLengleretal.2018, author = {K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of Lexicographic Parsimony Pressure and Alleviation by a Concatenation Crossover in Genetic Programming}, series = {Parallel Problem Solving from Nature - PPSN XV}, volume = {11102}, journal = {Parallel Problem Solving from Nature - PPSN XV}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-99259-4}, issn = {0302-9743}, doi = {10.1007/978-3-319-99259-4_4}, pages = {42 -- 54}, year = {2018}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation. First, the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts). Second, the role and realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had a surprisingly little share in this work. We analyze a simple crossover operator in combination with local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); the resulting algorithm is denoted Concatenation Crossover GP. For this purpose three variants of the wellstudied Majority test function with large plateaus are considered. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control.}, language = {en} } @article{KoetzingLagodzinskiLengleretal.2020, author = {K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes and Melnichenko, Anna}, title = {Destructiveness of lexicographic parsimony pressure and alleviation by a concatenation crossover in genetic programming}, series = {Theoretical computer science}, volume = {816}, journal = {Theoretical computer science}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0304-3975}, doi = {10.1016/j.tcs.2019.11.036}, pages = {96 -- 113}, year = {2020}, abstract = {For theoretical analyses there are two specifics distinguishing GP from many other areas of evolutionary computation: the variable size representations, in particular yielding a possible bloat (i.e. the growth of individuals with redundant parts); and also the role and the realization of crossover, which is particularly central in GP due to the tree-based representation. Whereas some theoretical work on GP has studied the effects of bloat, crossover had surprisingly little share in this work.
We analyze a simple crossover operator in combination with randomized local search, where a preference for small solutions minimizes bloat (lexicographic parsimony pressure); we denote the resulting algorithm Concatenation Crossover GP. We consider three variants of the well-studied MAJORITY test function, adding large plateaus in different ways to the fitness landscape and thus giving a test bed for analyzing the interplay of variation operators and bloat control mechanisms in a setting with local optima. We show that the Concatenation Crossover GP can efficiently optimize these test functions, while local search cannot be efficient for all three variants independent of employing bloat control. (C) 2019 Elsevier B.V. All rights reserved.}, language = {en} } @article{DoerrKoetzing2022, author = {Doerr, Benjamin and K{\"o}tzing, Timo}, title = {Lower bounds from fitness levels made easy}, series = {Algorithmica}, journal = {Algorithmica}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-022-00952-w}, pages = {29}, year = {2022}, abstract = {One of the first and easy to use techniques for proving run time bounds for evolutionary algorithms is the so-called method of fitness levels by Wegener. It uses a partition of the search space into a sequence of levels which are traversed by the algorithm in increasing order, possibly skipping levels. An easy, but often strong upper bound for the run time can then be derived by adding the reciprocals of the probabilities to leave the levels (or upper bounds for these). Unfortunately, a similarly effective method for proving lower bounds has not yet been established. The strongest such method, proposed by Sudholt (2013), requires a careful choice of the viscosity parameters gamma(i), j, 0 <= i < j <= n. In this paper we present two new variants of the method, one for upper and one for lower bounds. Besides the level leaving probabilities, they only rely on the probabilities that levels are visited at all. We show that these can be computed or estimated without greater difficulties and apply our method to reprove the following known results in an easy and natural way. (i) The precise run time of the (1+1) EA on LEADINGONES. (ii) A lower bound for the run time of the (1+1) EA on ONEMAX, tight apart from an O(n) term. (iii) A lower bound for the run time of the (1+1) EA on long k-paths (which differs slightly from the previous result due to a small error in the latter). We also prove a tighter lower bound for the run time of the (1+1) EA on jump functions by showing that, regardless of the jump size, only with probability O(2(-n)) the algorithm can avoid to jump over the valley of low fitness.}, language = {en} } @article{DoerrKoetzingLagodzinskietal.2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo and Lagodzinski, Gregor J. A. and Lengler, Johannes}, title = {The impact of lexicographic parsimony pressure for ORDER/MAJORITY on the run time}, series = {Theoretical computer science : the journal of the EATCS}, volume = {816}, journal = {Theoretical computer science : the journal of the EATCS}, publisher = {Elsevier}, address = {Amsterdam [u.a.]}, issn = {0304-3975}, doi = {10.1016/j.tcs.2020.01.011}, pages = {144 -- 168}, year = {2020}, abstract = {While many optimization problems work with a fixed number of decision variables and thus a fixed-length representation of possible solutions, genetic programming (GP) works on variable-length representations. A naturally occurring problem is that of bloat, that is, the unnecessary growth of solution lengths, which may slow down the optimization process. So far, the mathematical runtime analysis could not deal well with bloat and required explicit assumptions limiting bloat. In this paper, we provide the first mathematical runtime analysis of a GP algorithm that does not require any assumptions on the bloat. Previous performance guarantees were only proven conditionally for runs in which no strong bloat occurs. Together with improved analyses for the case with bloat restrictions our results show that such assumptions on the bloat are not necessary and that the algorithm is efficient without explicit bloat control mechanism. More specifically, we analyzed the performance of the (1 + 1) GP on the two benchmark functions ORDER and MAJORITY. When using lexicographic parsimony pressure as bloat control, we show a tight runtime estimate of O(T-init + nlogn) iterations both for ORDER and MAJORITY. For the case without bloat control, the bounds O(T-init logT(i)(nit) + n(logn)(3)) and Omega(T-init + nlogn) (and Omega(T-init log T-init) for n = 1) hold for MAJORITY(1).}, language = {en} } @article{DoerrKoetzing2020, author = {Doerr, Benjamin and K{\"o}tzing, Timo}, title = {Multiplicative Up-Drift}, series = {Algorithmica}, volume = {83}, journal = {Algorithmica}, number = {10}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00775-7}, pages = {3017 -- 3058}, year = {2020}, abstract = {Drift analysis aims at translating the expected progress of an evolutionary algorithm (or more generally, a random process) into a probabilistic guarantee on its run time (hitting time). So far, drift arguments have been successfully employed in the rigorous analysis of evolutionary algorithms, however, only for the situation that the progress is constant or becomes weaker when approaching the target. Motivated by questions like how fast fit individuals take over a population, we analyze random processes exhibiting a (1+delta)-multiplicative growth in expectation. We prove a drift theorem translating this expected progress into a hitting time. This drift theorem gives a simple and insightful proof of the level-based theorem first proposed by Lehre (2011). Our version of this theorem has, for the first time, the best-possible near-linear dependence on 1/delta} (the previous results had an at least near-quadratic dependence), and it only requires a population size near-linear in delta (this was super-quadratic in previous results). These improvements immediately lead to stronger run time guarantees for a number of applications. We also discuss the case of large delta and show stronger results for this setting.}, language = {en} } @article{ShiSchirneckFriedrichetal.2018, author = {Shi, Feng and Schirneck, Friedrich Martin and Friedrich, Tobias and K{\"o}tzing, Timo and Neumann, Frank}, title = {Reoptimization time analysis of evolutionary algorithms on linear functions under dynamic uniform constraints}, series = {Algorithmica : an international journal in computer science}, volume = {82}, journal = {Algorithmica : an international journal in computer science}, number = {10}, publisher = {Springer}, address = {New York}, issn = {0178-4617}, doi = {10.1007/s00453-020-00739-x}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-605295}, pages = {3117 -- 3123}, year = {2018}, abstract = {Rigorous runtime analysis is a major approach towards understanding evolutionary computing techniques, and in this area linear pseudo-Boolean objective functions play a central role. Having an additional linear constraint is then equivalent to the NP-hard Knapsack problem, certain classes thereof have been studied in recent works. In this article, we present a dynamic model of optimizing linear functions under uniform constraints. Starting from an optimal solution with respect to a given constraint bound, we investigate the runtimes that different evolutionary algorithms need to recompute an optimal solution when the constraint bound changes by a certain amount. The classical (1+1) EA and several population-based algorithms are designed for that purpose, and are shown to recompute efficiently. Furthermore, a variant of the (1+(λ,λ))GA for the dynamic optimization problem is studied, whose performance is better when the change of the constraint bound is small.}, language = {en} }