@book{KubanRottaNolteetal.2023, author = {Kuban, Robert and Rotta, Randolf and Nolte, J{\"o}rg and Chromik, Jonas and Beilharz, Jossekin Jakob and Pirl, Lukas and Friedrich, Tobias and Lenzner, Pascal and Weyand, Christopher and Juiz, Carlos and Bermejo, Belen and Sauer, Joao and Coelh, Leandro dos Santos and Najafi, Pejman and P{\"u}nter, Wenzel and Cheng, Feng and Meinel, Christoph and Sidorova, Julia and Lundberg, Lars and Vogel, Thomas and Tran, Chinh and Moser, Irene and Grunske, Lars and Elsaid, Mohamed Esameldin Mohamed and Abbas, Hazem M. and Rula, Anisa and Sejdiu, Gezim and Maurino, Andrea and Schmidt, Christopher and H{\"u}gle, Johannes and Uflacker, Matthias and Nozza, Debora and Messina, Enza and Hoorn, Andr{\´e} van and Frank, Markus and Schulz, Henning and Alhosseini Almodarresi Yasin, Seyed Ali and Nowicki, Marek and Muite, Benson K. and Boysan, Mehmet Can and Bianchi, Federico and Cremaschi, Marco and Moussa, Rim and Abdel-Karim, Benjamin M. and Pfeuffer, Nicolas and Hinz, Oliver and Plauth, Max and Polze, Andreas and Huo, Da and Melo, Gerard de and Mendes Soares, F{\´a}bio and Oliveira, Roberto C{\´e}lio Lim{\~a}o de and Benson, Lawrence and Paul, Fabian and Werling, Christian and Windheuser, Fabian and Stojanovic, Dragan and Djordjevic, Igor and Stojanovic, Natalija and Stojnev Ilic, Aleksandra and Weidmann, Vera and Lowitzki, Leon and Wagner, Markus and Ifa, Abdessatar Ben and Arlos, Patrik and Megia, Ana and Vendrell, Joan and Pfitzner, Bjarne and Redondo, Alberto and R{\´i}os Insua, David and Albert, Justin Amadeus and Zhou, Lin and Arnrich, Bert and Szab{\´o}, Ildik{\´o} and Fodor, Szabina and Ternai, Katalin and Bhowmik, Rajarshi and Campero Durand, Gabriel and Shevchenko, Pavlo and Malysheva, Milena and Prymak, Ivan and Saake, Gunter}, title = {HPI Future SOC Lab - Proceedings 2019}, number = {158}, editor = {Meinel, Christoph and Polze, Andreas and Beins, Karsten and Strotmann, Rolf and Seibold, Ulrich and R{\"o}dszus, Kurt and M{\"u}ller, J{\"u}rgen}, publisher = {Universit{\"a}tsverlag Potsdam}, address = {Potsdam}, isbn = {978-3-86956-564-4}, issn = {1613-5652}, doi = {10.25932/publishup-59791}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus4-597915}, publisher = {Universit{\"a}t Potsdam}, pages = {xi, 301}, year = {2023}, abstract = {The "HPI Future SOC Lab" is a cooperation of the Hasso Plattner Institute (HPI) and industry partners. Its mission is to enable and promote exchange and interaction between the research community and the industry partners. The HPI Future SOC Lab provides researchers with free of charge access to a complete infrastructure of state of the art hard and software. This infrastructure includes components, which might be too expensive for an ordinary research environment, such as servers with up to 64 cores and 2 TB main memory. The offerings address researchers particularly from but not limited to the areas of computer science and business information systems. Main areas of research include cloud computing, parallelization, and In-Memory technologies. This technical report presents results of research projects executed in 2019. Selected projects have presented their results on April 9th and November 12th 2019 at the Future SOC Lab Day events.}, language = {en} } @article{LongdeMeloHeetal.2020, author = {Long, Xiang and de Melo, Gerard and He, Dongliang and Li, Fu and Chi, Zhizhen and Wen, Shilei and Gan, Chuang}, title = {Purely attention based local feature integration for video classification}, series = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {44}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {4}, publisher = {Inst. of Electr. and Electronics Engineers}, address = {Los Alamitos}, issn = {0162-8828}, doi = {10.1109/TPAMI.2020.3029554}, pages = {2140 -- 2154}, year = {2020}, abstract = {Recently, substantial research effort has focused on how to apply CNNs or RNNs to better capture temporal patterns in videos, so as to improve the accuracy of video classification. In this paper, we investigate the potential of a purely attention based local feature integration. Accounting for the characteristics of such features in video classification, we first propose Basic Attention Clusters (BAC), which concatenates the output of multiple attention units applied in parallel, and introduce a shifting operation to capture more diverse signals. Experiments show that BAC can achieve excellent results on multiple datasets. However, BAC treats all feature channels as an indivisible whole, which is suboptimal for achieving a finer-grained local feature integration over the channel dimension. Additionally, it treats the entire local feature sequence as an unordered set, thus ignoring the sequential relationships. To improve over BAC, we further propose the channel pyramid attention schema by splitting features into sub-features at multiple scales for coarse-to-fine sub-feature interaction modeling, and propose the temporal pyramid attention schema by dividing the feature sequences into ordered sub-sequences of multiple lengths to account for the sequential order. Our final model pyramidxpyramid attention clusters (PPAC) combines both channel pyramid attention and temporal pyramid attention to focus on the most important sub-features, while also preserving the temporal information of the video. We demonstrate the effectiveness of PPAC on seven real-world video classification datasets. Our model achieves competitive results across all of these, showing that our proposed framework can consistently outperform the existing local feature integration methods across a range of different scenarios.}, language = {en} } @article{KulahciogluMelo2020, author = {Kulahcioglu, Tugba and Melo, Gerard de}, title = {Affect-aware word clouds}, series = {ACM transactions on interactive intelligent systems}, volume = {10}, journal = {ACM transactions on interactive intelligent systems}, number = {4}, publisher = {Association for Computing Machinery}, address = {New York, NY}, issn = {2160-6455}, doi = {10.1145/3370928}, pages = {25}, year = {2020}, abstract = {Word clouds are widely used for non-analytic purposes, such as introducing a topic to students, or creating a gift with personally meaningful text. Surveys show that users prefer tools that yield word clouds with a stronger emotional impact. Fonts and color palettes are powerful typographical signals that may determine this impact. Typically, these signals are assigned randomly, or expected to be chosen by the users. We present an affect-aware font and color palette selection methodology that aims to facilitate more informed choices. We infer associations of fonts with a set of eight affects, and evaluate the resulting data in a series of user studies both on individual words as well as in word clouds. Relying on a recent study to procure affective color palettes, we carry out a similar user study to understand the impact of color choices on word clouds. Our findings suggest that both fonts and color palettes are powerful tools contributing to the affects evoked by a word cloud. The experiments further confirm that the novel datasets we propose are successful in enabling this. We also find that, for the majority of the affects, both signals need to be congruent to create a stronger impact. Based on this data, we implement a prototype that allows users to specify a desired affect and recommends congruent fonts and color palettes for the word.}, language = {en} } @article{PuriVardeMelo2023, author = {Puri, Manish and Varde, Aparna S. and Melo, Gerard de}, title = {Commonsense based text mining on urban policy}, series = {Language resources and evaluation}, volume = {57}, journal = {Language resources and evaluation}, publisher = {Springer}, address = {Dordrecht [u.a.]}, issn = {1574-020X}, doi = {10.1007/s10579-022-09584-6}, pages = {733 -- 763}, year = {2023}, abstract = {Local laws on urban policy, i.e., ordinances directly affect our daily life in various ways (health, business etc.), yet in practice, for many citizens they remain impervious and complex. This article focuses on an approach to make urban policy more accessible and comprehensible to the general public and to government officials, while also addressing pertinent social media postings. Due to the intricacies of the natural language, ranging from complex legalese in ordinances to informal lingo in tweets, it is practical to harness human judgment here. To this end, we mine ordinances and tweets via reasoning based on commonsense knowledge so as to better account for pragmatics and semantics in the text. Ours is pioneering work in ordinance mining, and thus there is no prior labeled training data available for learning. This gap is filled by commonsense knowledge, a prudent choice in situations involving a lack of adequate training data. The ordinance mining can be beneficial to the public in fathoming policies and to officials in assessing policy effectiveness based on public reactions. This work contributes to smart governance, leveraging transparency in governing processes via public involvement. We focus significantly on ordinances contributing to smart cities, hence an important goal is to assess how well an urban region heads towards a smart city as per its policies mapping with smart city characteristics, and the corresponding public satisfaction.}, language = {en} }