@article{5115,
author = {Gilroy, Bernard Michael and Nguyen, Birke Thuy Duong},
journal = {WiSt-Wirtschaftswissenschaftliches Studium},
number = {3},
pages = {134--140},
publisher = {Verlag Franz Vahlen GmbH},
title = {{Ist Fairer Handel Wirklich Fair?}},
volume = {42},
year = {2013},
}
@inproceedings{513,
abstract = {This paper initiates the study of self-adjusting networks (or distributed data structures) whose topologies dynamically adapt to a communication pattern $\sigma$. We present a fully decentralized self-adjusting solution called SplayNet. A SplayNet is a distributed generalization of the classic splay tree concept. It ensures short paths (which can be found using local-greedy routing) between communication partners while minimizing topological rearrangements. We derive an upper bound for the amortized communication cost of a SplayNet based on empirical entropies of $\sigma$, and show that SplayNets have several interesting convergence properties. For instance, SplayNets features a provable online optimality under special requests scenarios. We also investigate the optimal static network and prove different lower bounds for the average communication cost based on graph cuts and on the empirical entropy of the communication pattern $\sigma$. From these lower bounds it follows, e.g., that SplayNets are optimal in scenarios where the requests follow a product distribution as well. Finally, this paper shows that in contrast to the Minimum Linear Arrangement problem which is generally NP-hard, the optimal static tree network can be computed in polynomial time for any guest graph, despite the exponentially large graph family. We complement our formal analysis with a small simulation study on a Facebook graph.},
author = {Avin, Chen and Häupler, Bernhard and Lotker, Zvi and Scheideler, Christian and Schmid, Stefan},
booktitle = {Proceedings of the 27th IEEE International Parallel and Distributed Processing Symposium (IPDPS)},
pages = {395--406},
title = {{Locally Self-Adjusting Tree Networks}},
doi = {10.1109/IPDPS.2013.40},
year = {2013},
}
@techreport{5146,
abstract = {In this paper, we analyze a model in which two divisions negotiate over an intrafirm transfer price for an intermediate product. Formally, we consider bargaining problems under incomplete information, since the upstream division’s (seller's) costs and downstream division's (buyer's) revenues are supposed to be private information. Assuming two possible types for buyer and seller each, we first establish that the bargaining problem is regular, regardless whether incentive and/or efficiency constraints are imposed. This allows us to apply the generalized Nash bargaining solution to determine transfer payments and transfer probabilities. Furthermore, we derive general properties of this solution for the transfer pricing problem and compare the model developed here with the existing literature for negotiated transfer pricing under incomplete information. In particular, we focus on the models presented in Wagenhofer (1994).},
author = {Brangewitz, Sonja and Haake, Claus-Jochen},
keyword = {Transfer Pricing, Negotiation, Generalized Nash Bargaining Solution, Incomplete Information},
publisher = {CIE Working Paper Series, Paderborn University},
title = {{Cooperative Transfer Price Negotiations under Incomplete Information}},
volume = {64},
year = {2013},
}
@book{5172,
author = {Sievers, Sönke},
isbn = {978-3-86582-925-2},
keyword = {Unternehmensbewertung, Unternehmenswachstum, Return on Investment Unternehmensbewertung, Investition, Steuervergünstigung},
publisher = {Verlag-Haus Monsenstein und Vannerdat},
title = {{Company Valuation and Growth: Theory, Empirical Evidence and Practical Implementation Issues}},
year = {2013},
}
@inproceedings{498,
abstract = {Proof-carrying code approaches aim at safe execution of untrusted code by having the code producer attach a safety proof to the code which the code consumer only has to validate. Depending on the type of safety property, proofs can however become quite large and their validation - though faster than their construction - still time consuming. In this paper we introduce a new concept for safe execution of untrusted code. It keeps the idea of putting the time consuming part of proving on the side of the code producer, however, attaches no proofs to code anymore but instead uses the proof to transform the program into an equivalent but more eﬃciently veriﬁable program. Code consumers thus still do proving themselves, however, on a computationally inexpensive level only. Experimental results show that the proof eﬀort can be reduced by several orders of magnitude, both with respect to time and space.},
author = {Wonisch, Daniel and Schremmer, Alexander and Wehrheim, Heike},
booktitle = {Proceedings of the 25th International Conference on Computer Aided Verification (CAV)},
pages = {912--927},
title = {{Programs from Proofs – A PCC Alternative}},
doi = {10.1007/978-3-642-39799-8_65},
year = {2013},
}
@inproceedings{4991,
author = {Oertel, Simon and Thommes, Kirsten and Walgenbach, Peter},
booktitle = {Academy of Management Proceedings},
number = {1},
pages = {11642},
title = {{Born in the GDR: Imprinting, Structural Inertia and the Survival Chances of Organizations}},
year = {2013},
}
@phdthesis{501,
abstract = {Handling run-time dynamics on embedded system-on-chip architectures has become more challenging over the years. On the one hand, the impact of workload and physical dynamics on the system behavior has dramatically increased. On the other hand, embedded architectures have become more complex as they have evolved from single-processor systems over multi-processor systems to hybrid multi-core platforms.Static design-time techniques no longer provide suitable solutions to deal with the run-time dynamics of today's embedded systems. Therefore, system designers have to apply run-time solutions, which have hardly been investigated for hybrid multi-core platforms.In this thesis, we present fundamental work in the new area of run-time management on hybrid multi-core platforms. We propose a novel architecture, a self-adaptive hybrid multi-core system, that combines heterogeneous processors, reconfigurable hardware cores, and monitoring cores on a single chip. Using self-adaptation on thread-level, our hybrid multi-core systems can effectively perform performance and thermal management autonomously at run-time. },
author = {Happe, Markus},
isbn = {978-3-8325-3425-7},
pages = {220},
publisher = {Logos Verlag Berlin GmbH},
title = {{Performance and thermal management on self-adaptive hybrid multi-cores}},
year = {2013},
}
@article{4600,
author = {Guo, Zhichao and Feng, Yuanhua},
issn = {0264-9993},
journal = {Economic Modelling},
pages = {474--483},
publisher = {Elsevier BV},
title = {{Modeling of the impact of the financial crisis and China's accession to WTO on China's exports to Germany}},
doi = {10.1016/j.econmod.2012.12.015},
volume = {31},
year = {2013},
}
@inproceedings{4698,
author = {Gregor, Shirley and Müller, Oliver and Seidel, Stefan},
booktitle = {European Conference on Information Systems},
keyword = {Abstraction, Affordances, Design Science Research, Design Theory, Information Systems Development, Reflection, Theorizing},
title = {{Reflection, abstraction and theorizing in design and development research}},
year = {2013},
}
@techreport{474,
abstract = {Suppose some individuals are allowed to engage in different groups at the same time and they generate a certain welfare by cooperation. Finding appropriate ways for distributing this welfare is a non-trivial issue. The purpose of this work is to analyze two-stage allocation procedures where first each group receives a share of the welfare which is then, subsequently, distributed among the corresponding members. To study these procedures in a structured way, cooperative games and network games are combined in a general framework by using mathematical hypergraphs. Moreover, several convincing requirements on allocation procedures are discussed and formalized. Thereby it will be shown, for example, that the Position Value and iteratively applying the Myerson Value can be characterized by similar axiomatizations.},
author = {Röhl, Nils},
publisher = {Universität Paderborn},
title = {{Two-Stage Allocation Procedures}},
year = {2013},
}
@inproceedings{481,
abstract = {Cloud computing offers high availability, dynamic scalability, and elasticity requiring only very little administration. However, this service comes with financial costs. Peer-to-peer systems, in contrast, operate at very low costs but cannot match the quality of service of the cloud. This paper focuses on the case study of Wikipedia and presents an approach to reduce the operational costs of hosting similar websites in the cloud by using a practical peer-to-peer approach. The visitors of the site are joining a Chord overlay, which acts as first cache for article lookups. Simulation results show, that up to 72% of the article lookups in Wikipedia could be answered by other visitors instead of using the cloud.},
author = {Graffi, Kalman and Bremer, Lars},
booktitle = {Proceedings of the International Conference on Communications (ICC'13)},
pages = {3444 -- 3449 },
title = {{Symbiotic Coupling of P2P and Cloud Systems: The Wikipedia Case}},
doi = {10.1109/ICC.2013.6655082},
year = {2013},
}
@article{4852,
author = {Haas, Alexander and Eggert, Andreas and Terho, Harri and Ulaga, Wolfgang},
journal = {Marketing Review St. Gallen},
number = {4},
pages = {64----73},
title = {{Erfolgsfaktor Value-Based Selling—Verkaufen, wenn Kundenorientierung nicht zum Erfolg führt}},
year = {2013},
}
@misc{486,
author = {Otte, Oliver},
publisher = {Universität Paderborn},
title = {{Seitenkanalresistenz paarungsbasierter Kryptographie}},
year = {2013},
}
@techreport{4915,
author = {Mir Djawadi, Behnud and Fahr, Rene},
title = {{The impact of risk perception and risk attitudes on corrupt behavior: Evidence from a petty corruption experiment}},
year = {2013},
}
@misc{493,
author = {Terentjew, Artjom},
publisher = {Universität Paderborn},
title = {{Reputationssysteme und Gerichtsverfahren als Wekzeuge zur Sicherstellung von Qualitätsstandards in Transaktionen}},
year = {2013},
}
@inproceedings{563,
abstract = {Dominating set based virtual backbones are used for rou-ting in wireless ad-hoc networks. Such backbones receive and transmit messages from/to every node in the network. Existing distributed algorithms only consider undirected graphs, which model symmetric networks with uniform transmission ranges. We are particularly interested in the well-established disk graphs, which model asymmetric networks with non-uniform transmission ranges. The corresponding graph theoretic problem seeks a strongly connected dominating-absorbent set of minimum cardinality in a digraph. A subset of nodes in a digraph is a strongly connected dominating-absorbent set if the subgraph induced by these nodes is strongly connected and each node in the graph is either in the set or has both an in-neighbor and an out-neighbor in it. We introduce the first distributed algorithm for this problem in disk graphs. The algorithm gives an O(k^4) -approximation ratio and has a runtime bound of O(Diam) where Diam is the diameter of the graph and k denotes the transmission ratio r_{max}/r_{min} with r_{max} and r_{min} being the maximum and minimum transmission range, respectively. Moreover, we apply our algorithm on the subgraph of disk graphs consisting of only bidirectional edges. Our algorithm gives an O(ln k) -approximation and a runtime bound of O(k^8 log^∗ n) , which, for bounded k , is an optimal approximation for the problem, following Lenzen and Wattenhofer’s Ω(log^∗ n) runtime lower bound for distributed constant approximation in disk graphs.},
author = {Markarian, Christine and Meyer auf der Heide, Friedhelm and Schubert, Michael},
booktitle = {Proceedings of the 9th International Symposium on Algorithms and Experiments for Sensor Systems, Wireless Networks and Distributed Robotics (ALGOSENSORS)},
pages = {217--227},
title = {{A Distributed Approximation Algorithm for Strongly Connected Dominating-Absorbent Sets in Asymmetric Wireless Ad-Hoc Networks}},
doi = {10.1007/978-3-642-45346-5_16},
year = {2013},
}
@inproceedings{6271,
author = {Weber, H and Wehner, M and Kabst, Rüdiger},
title = {{Institutional Determinants of Work-Family Practices: An International Comparative Examination. }},
year = {2013},
}
@inbook{6276,
author = {Klompmaker, Florian and Paelke, Volker and Fischer, Holger Gerhard},
booktitle = {Distributed, Ambient, and Pervasive Interactions},
isbn = {9783642393501},
issn = {0302-9743},
location = {Las Vegas, USA},
pages = {32--41},
publisher = {Springer Berlin Heidelberg},
title = {{A Taxonomy-Based Approach towards NUI Interaction Design}},
doi = {10.1007/978-3-642-39351-8_4},
volume = {8028},
year = {2013},
}
@misc{537,
author = {Heindorf, Stefan},
publisher = {Universität Paderborn},
title = {{Dispersion of Multi-Robot Teams}},
year = {2013},
}
@inproceedings{544,
abstract = {Comparative evaluations of peer-to-peer protocols through simulations are a viable approach to judge the performance and costs of the individual protocols in large-scale networks. In order to support this work, we enhanced the peer-to-peer systems simulator PeerfactSim.KOM with a fine-grained analyzer concept, with exhaustive automated measurements and gnuplot generators as well as a coordination control to evaluate a set of experiment setups in parallel. Thus, by configuring all experiments and protocols only once and starting the simulator, all desired measurements are performed, analyzed, evaluated and combined, resulting in a holistic environment for the comparative evaluation of peer-to-peer systems.},
author = {Feldotto, Matthias and Graffi, Kalman},
booktitle = {Proceedings of the International Conference on High Performance Computing and Simulation (HPCS'13)},
pages = {99--106},
title = {{Comparative Evaluation of Peer-to-Peer Systems Using PeerfactSim.KOM}},
doi = {10.1109/HPCSim.2013.6641399},
year = {2013},
}