corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-401 | 0705.2876 | An online algorithm for generating fractal hash chains applied to digital chains of custody | <|reference_start|>An online algorithm for generating fractal hash chains applied to digital chains of custody: This paper gives an online algorithm for generating Jakobsson's fractal hash chains. Our new algorithm compliments Jakobsson's fractal hash chain algorithm for preimage traversal since his algorithm assumes the entire hash chain is precomputed and a particular list of Ceiling(log n) hash elements or pebbles are saved. Our online algorithm for hash chain traversal incrementally generates a hash chain of n hash elements without knowledge of n before it starts. For any n, our algorithm stores only the Ceiling(log n) pebbles which are precisely the inputs for Jakobsson's amortized hash chain preimage traversal algorithm. This compact representation is useful to generate, traverse, and store a number of large digital hash chains on a small and constrained device. We also give an application using both Jakobsson's and our new algorithm applied to digital chains of custody for validating dynamically changing forensics data.<|reference_end|> | arxiv | @article{bradford2007an,
title={An online algorithm for generating fractal hash chains applied to
digital chains of custody},
author={Phillip G. Bradford and Daniel A. Ray},
journal={arXiv preprint arXiv:0705.2876},
year={2007},
archivePrefix={arXiv},
eprint={0705.2876},
primaryClass={cs.CR cs.DS}
} | bradford2007an |
arxiv-402 | 0705.3013 | A stochastic non-cooperative game for energy efficiency in wireless data networks | <|reference_start|>A stochastic non-cooperative game for energy efficiency in wireless data networks: In this paper the issue of energy efficiency in CDMA wireless data networks is addressed through a game theoretic approach. Building on a recent paper by the first two authors, wherein a non-cooperative game for spreading-code optimization, power control, and receiver design has been proposed to maximize the ratio of data throughput to transmit power for each active user, a stochastic algorithm is here described to perform adaptive implementation of the said non-cooperative game. The proposed solution is based on a combination of RLS-type and LMS-type adaptations, and makes use of readily available measurements. Simulation results show that its performance approaches with satisfactory accuracy that of the non-adaptive game, which requires a much larger amount of prior information.<|reference_end|> | arxiv | @article{buzzi2007a,
title={A stochastic non-cooperative game for energy efficiency in wireless data
networks},
author={Stefano Buzzi, H. Vincent Poor, and Daniela Saturnino},
journal={arXiv preprint arXiv:0705.3013},
year={2007},
archivePrefix={arXiv},
eprint={0705.3013},
primaryClass={cs.IT cs.GT math.IT}
} | buzzi2007a |
arxiv-403 | 0705.3015 | An Extensible Timing Infrastructure for Adaptive Large-scale Applications | <|reference_start|>An Extensible Timing Infrastructure for Adaptive Large-scale Applications: Real-time access to accurate and reliable timing information is necessary to profile scientific applications, and crucial as simulations become increasingly complex, adaptive, and large-scale. The Cactus Framework provides flexible and extensible capabilities for timing information through a well designed infrastructure and timing API. Applications built with Cactus automatically gain access to built-in timers, such as gettimeofday and getrusage, system-specific hardware clocks, and high-level interfaces such as PAPI. We describe the Cactus timer interface, its motivation, and its implementation. We then demonstrate how this timing information can be used by an example scientific application to profile itself, and to dynamically adapt itself to a changing environment at run time.<|reference_end|> | arxiv | @article{stark2007an,
title={An Extensible Timing Infrastructure for Adaptive Large-scale
Applications},
author={Dylan Stark, Gabrielle Allen, Tom Goodale, Thomas Radke, Erik
Schnetter},
journal={In Roman Wyrzykowski et al., editors, Parallel Processing and
Applied Mathematics (PPAM), 2007, Gdansk, Poland, volume 4967 of Lecture
Notes in Computer Science (LNCS), pages 1170-1179. Springer, 2007.},
year={2007},
archivePrefix={arXiv},
eprint={0705.3015},
primaryClass={cs.PF cs.DC}
} | stark2007an |
arxiv-404 | 0705.3025 | Spectral Efficiency of Spectrum Pooling Systems | <|reference_start|>Spectral Efficiency of Spectrum Pooling Systems: In this contribution, we investigate the idea of using cognitive radio to reuse locally unused spectrum to increase the total system capacity. We consider a multiband/wideband system in which the primary and cognitive users wish to communicate to different receivers, subject to mutual interference and assume that each user knows only his channel and the unused spectrum through adequate sensing. The basic idea under the proposed scheme is based on the notion of spectrum pooling. The idea is quite simple: a cognitive radio will listen to the channel and, if sensed idle, will transmit during the voids. It turns out that, although its simplicity, the proposed scheme showed very interesting features with respect to the spectral efficiency and the maximum number of possible pairwise cognitive communications. We impose the constraint that users successively transmit over available bands through selfish water filling. For the first time, our study has quantified the asymptotic (with respect to the band) achievable gain of using spectrum pooling in terms of spectral efficiency compared to classical radio systems. We then derive the total spectral efficiency as well as the maximum number of possible pairwise communications of such a spectrum pooling system.<|reference_end|> | arxiv | @article{haddad2007spectral,
title={Spectral Efficiency of Spectrum Pooling Systems},
author={Majed Haddad, Aawatif Menouni Hayar and Merouane Debbah},
journal={IET Special Issue on Cognitive Spectrum Access, Vol. 2, No. 6, pp.
733-741, July 2008},
year={2007},
archivePrefix={arXiv},
eprint={0705.3025},
primaryClass={cs.IT cs.NI math.IT}
} | haddad2007spectral |
arxiv-405 | 0705.3050 | A competitive multi-agent model of interbank payment systems | <|reference_start|>A competitive multi-agent model of interbank payment systems: We develop a dynamic multi-agent model of an interbank payment system where banks choose their level of available funds on the basis of private payoff maximisation. The model consists of the repetition of a simultaneous move stage game with incomplete information, incomplete monitoring, and stochastic payoffs. Adaptation takes place with bayesian updating, with banks maximizing immediate payoffs. We carry out numerical simulations to solve the model and investigate two special scenarios: an operational incident and exogenous throughput guidelines for payment submission. We find that the demand for intraday credit is an S-shaped function of the cost ratio between intraday credit costs and the costs associated with delaying payments. We also find that the demand for liquidity is increased both under operational incidents and in the presence of effective throughput guidelines.<|reference_end|> | arxiv | @article{galbiati2007a,
title={A competitive multi-agent model of interbank payment systems},
author={Marco Galbiati and Kimmo Soramaki},
journal={arXiv preprint arXiv:0705.3050},
year={2007},
archivePrefix={arXiv},
eprint={0705.3050},
primaryClass={cs.MA}
} | galbiati2007a |
arxiv-406 | 0705.3058 | On the Shannon capacity and queueing stability of random access multicast | <|reference_start|>On the Shannon capacity and queueing stability of random access multicast: We study and compare the Shannon capacity region and the stable throughput region for a random access system in which source nodes multicast their messages to multiple destination nodes. Under an erasure channel model which accounts for interference and allows for multipacket reception, we first characterize the Shannon capacity region. We then consider a queueing-theoretic formulation and characterize the stable throughput region for two different transmission policies: a retransmission policy and random linear coding. Our results indicate that for large blocklengths, the random linear coding policy provides a higher stable throughput than the retransmission policy. Furthermore, our results provide an example of a transmission policy for which the Shannon capacity region strictly outer bounds the stable throughput region, which contradicts an unproven conjecture that the Shannon capacity and stable throughput coincide for random access systems.<|reference_end|> | arxiv | @article{shrader2007on,
title={On the Shannon capacity and queueing stability of random access
multicast},
author={Brooke Shrader and Anthony Ephremides},
journal={arXiv preprint arXiv:0705.3058},
year={2007},
archivePrefix={arXiv},
eprint={0705.3058},
primaryClass={cs.IT math.IT}
} | shrader2007on |
arxiv-407 | 0705.3061 | Measuring and Localing Homology Classes | <|reference_start|>Measuring and Localing Homology Classes: We develop a method for measuring and localizing homology classes. This involves two problems. First, we define relevant notions of size for both a homology class and a homology group basis, using ideas from relative homology. Second, we propose an algorithm to compute the optimal homology basis, using techniques from persistent homology and finite field algebra. Classes of the computed optimal basis are localized with cycles conveying their sizes. The algorithm runs in $O(\beta^4 n^3 \log^2 n)$ time, where $n$ is the size of the simplicial complex and $\beta$ is the Betti number of the homology group.<|reference_end|> | arxiv | @article{freedman2007measuring,
title={Measuring and Localing Homology Classes},
author={Daniel Freedman and Chao Chen},
journal={arXiv preprint arXiv:0705.3061},
year={2007},
archivePrefix={arXiv},
eprint={0705.3061},
primaryClass={cs.CG math.AT}
} | freedman2007measuring |
arxiv-408 | 0705.3099 | Distortion Minimization in Gaussian Layered Broadcast Coding with Successive Refinement | <|reference_start|>Distortion Minimization in Gaussian Layered Broadcast Coding with Successive Refinement: A transmitter without channel state information (CSI) wishes to send a delay-limited Gaussian source over a slowly fading channel. The source is coded in superimposed layers, with each layer successively refining the description in the previous one. The receiver decodes the layers that are supported by the channel realization and reconstructs the source up to a distortion. The expected distortion is minimized by optimally allocating the transmit power among the source layers. For two source layers, the allocation is optimal when power is first assigned to the higher layer up to a power ceiling that depends only on the channel fading distribution; all remaining power, if any, is allocated to the lower layer. For convex distortion cost functions with convex constraints, the minimization is formulated as a convex optimization problem. In the limit of a continuum of infinite layers, the minimum expected distortion is given by the solution to a set of linear differential equations in terms of the density of the fading distribution. As the bandwidth ratio b (channel uses per source symbol) tends to zero, the power distribution that minimizes expected distortion converges to the one that maximizes expected capacity. While expected distortion can be improved by acquiring CSI at the transmitter (CSIT) or by increasing diversity from the realization of independent fading paths, at high SNR the performance benefit from diversity exceeds that from CSIT, especially when b is large.<|reference_end|> | arxiv | @article{ng2007distortion,
title={Distortion Minimization in Gaussian Layered Broadcast Coding with
Successive Refinement},
author={Chris T. K. Ng and Deniz Gunduz and Andrea Goldsmith and Elza Erkip},
journal={IEEE Trans. Inf. Theory, vol. 55, no. 11, pp. 5074-5086, Nov. 2009},
year={2007},
doi={10.1109/TIT.2009.2030455},
archivePrefix={arXiv},
eprint={0705.3099},
primaryClass={cs.IT math.IT}
} | ng2007distortion |
arxiv-409 | 0705.3227 | Computability of simple games: A characterization and application to the core | <|reference_start|>Computability of simple games: A characterization and application to the core: The class of algorithmically computable simple games (i) includes the class of games that have finite carriers and (ii) is included in the class of games that have finite winning coalitions. This paper characterizes computable games, strengthens the earlier result that computable games violate anonymity, and gives examples showing that the above inclusions are strict. It also extends Nakamura's theorem about the nonemptyness of the core and shows that computable games have a finite Nakamura number, implying that the number of alternatives that the players can deal with rationally is restricted.<|reference_end|> | arxiv | @article{kumabe2007computability,
title={Computability of simple games: A characterization and application to the
core},
author={Masahiro Kumabe, H. Reiju Mihara},
journal={Journal of Mathematical Economics, Volume 44, Issues 3-4, February
2008, Pages 348-366},
year={2007},
doi={10.1016/j.jmateco.2007.05.012},
archivePrefix={arXiv},
eprint={0705.3227},
primaryClass={cs.GT cs.CC cs.LO math.LO}
} | kumabe2007computability |
arxiv-410 | 0705.3261 | Recovering Multiplexing Loss Through Successive Relaying Using Repetition Coding | <|reference_start|>Recovering Multiplexing Loss Through Successive Relaying Using Repetition Coding: In this paper, a transmission protocol is studied for a two relay wireless network in which simple repetition coding is applied at the relays. Information-theoretic achievable rates for this transmission scheme are given, and a space-time V-BLAST signalling and detection method that can approach them is developed. It is shown through the diversity multiplexing tradeoff analysis that this transmission scheme can recover the multiplexing loss of the half-duplex relay network, while retaining some diversity gain. This scheme is also compared with conventional transmission protocols that exploit only the diversity of the network at the cost of a multiplexing loss. It is shown that the new transmission protocol offers significant performance advantages over conventional protocols, especially when the interference between the two relays is sufficiently strong.<|reference_end|> | arxiv | @article{fan2007recovering,
title={Recovering Multiplexing Loss Through Successive Relaying Using
Repetition Coding},
author={Yijia Fan, Chao Wang, John Thompson, H. Vincent Poor},
journal={arXiv preprint arXiv:0705.3261},
year={2007},
doi={10.1109/TWC.2007.060339},
archivePrefix={arXiv},
eprint={0705.3261},
primaryClass={cs.IT math.IT}
} | fan2007recovering |
arxiv-411 | 0705.3316 | Acyclicity of Preferences, Nash Equilibria, and Subgame Perfect Equilibria: a Formal and Constructive Equivalence | <|reference_start|>Acyclicity of Preferences, Nash Equilibria, and Subgame Perfect Equilibria: a Formal and Constructive Equivalence: In 1953, Kuhn showed that every sequential game has a Nash equilibrium by showing that a procedure, named ``backward induction'' in game theory, yields a Nash equilibrium. It actually yields Nash equilibria that define a proper subclass of Nash equilibria. In 1965, Selten named this proper subclass subgame perfect equilibria. In game theory, payoffs are rewards usually granted at the end of a game. Although traditional game theory mainly focuses on real-valued payoffs that are implicitly ordered by the usual total order over the reals, works of Simon or Blackwell already involved partially ordered payoffs. This paper generalises the notion of sequential game by replacing real-valued payoff functions with abstract atomic objects, called outcomes, and by replacing the usual total order over the reals with arbitrary binary relations over outcomes, called preferences. This introduces a general abstract formalism where Nash equilibrium, subgame perfect equilibrium, and ``backward induction'' can still be defined. This paper proves that the following three propositions are equivalent: 1) Preferences over the outcomes are acyclic. 2) Every sequential game has a Nash equilibrium. 3) Every sequential game has a subgame perfect equilibrium. The result is fully computer-certified using Coq. Beside the additional guarantee of correctness, the activity of formalisation using Coq also helps clearly identify the useful definitions and the main articulations of the proof.<|reference_end|> | arxiv | @article{roux2007acyclicity,
title={Acyclicity of Preferences, Nash Equilibria, and Subgame Perfect
Equilibria: a Formal and Constructive Equivalence},
author={St'ephane Le Roux (LIP)},
journal={arXiv preprint arXiv:0705.3316},
year={2007},
archivePrefix={arXiv},
eprint={0705.3316},
primaryClass={cs.DM cs.GT cs.LO}
} | roux2007acyclicity |
arxiv-412 | 0705.3343 | Optimal Separable Algorithms to Compute the Reverse Euclidean Distance Transformation and Discrete Medial Axis in Arbitrary Dimension | <|reference_start|>Optimal Separable Algorithms to Compute the Reverse Euclidean Distance Transformation and Discrete Medial Axis in Arbitrary Dimension: In binary images, the distance transformation (DT) and the geometrical skeleton extraction are classic tools for shape analysis. In this paper, we present time optimal algorithms to solve the reverse Euclidean distance transformation and the reversible medial axis extraction problems for $d$-dimensional images. We also present a $d$-dimensional medial axis filtering process that allows us to control the quality of the reconstructed shape.<|reference_end|> | arxiv | @article{coeurjolly2007optimal,
title={Optimal Separable Algorithms to Compute the Reverse Euclidean Distance
Transformation and Discrete Medial Axis in Arbitrary Dimension},
author={David Coeurjolly (LIRIS), Annick Montanvert (GIPSA-lab)},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence 29,
3 (01/03/2007) 437-448},
year={2007},
doi={10.1109/TPAMI.2007.54},
archivePrefix={arXiv},
eprint={0705.3343},
primaryClass={cs.CG}
} | coeurjolly2007optimal |
arxiv-413 | 0705.3344 | Multiuser detection in a dynamic environment Part I: User identification and data detection | <|reference_start|>Multiuser detection in a dynamic environment Part I: User identification and data detection: In random-access communication systems, the number of active users varies with time, and has considerable bearing on receiver's performance. Thus, techniques aimed at identifying not only the information transmitted, but also that number, play a central role in those systems. An example of application of these techniques can be found in multiuser detection (MUD). In typical MUD analyses, receivers are based on the assumption that the number of active users is constant and known at the receiver, and coincides with the maximum number of users entitled to access the system. This assumption is often overly pessimistic, since many users might be inactive at any given time, and detection under the assumption of a number of users larger than the real one may impair performance. The main goal of this paper is to introduce a general approach to the problem of identifying active users and estimating their parameters and data in a random-access system where users are continuously entering and leaving the system. The tool whose use we advocate is Random-Set Theory: applying this, we derive optimum receivers in an environment where the set of transmitters comprises an unknown number of elements. In addition, we can derive Bayesian-filter equations which describe the evolution with time of the a posteriori probability density of the unknown user parameters, and use this density to derive optimum detectors. In this paper we restrict ourselves to interferer identification and data detection, while in a companion paper we shall examine the more complex problem of estimating users' parameters.<|reference_end|> | arxiv | @article{biglieri2007multiuser,
title={Multiuser detection in a dynamic environment Part I: User identification
and data detection},
author={Ezio Biglieri, Marco Lops},
journal={arXiv preprint arXiv:0705.3344},
year={2007},
doi={10.1109/TIT.2007.903115},
archivePrefix={arXiv},
eprint={0705.3344},
primaryClass={cs.IT math.IT}
} | biglieri2007multiuser |
arxiv-414 | 0705.3360 | The Road to Quantum Artificial Intelligence | <|reference_start|>The Road to Quantum Artificial Intelligence: This paper overviews the basic principles and recent advances in the emerging field of Quantum Computation (QC), highlighting its potential application to Artificial Intelligence (AI). The paper provides a very brief introduction to basic QC issues like quantum registers, quantum gates and quantum algorithms and then it presents references, ideas and research guidelines on how QC can be used to deal with some basic AI problems, such as search and pattern matching, as soon as quantum computers become widely available.<|reference_end|> | arxiv | @article{sgarbas2007the,
title={The Road to Quantum Artificial Intelligence},
author={Kyriakos N. Sgarbas},
journal={In: T.S.Papatheodorou, D.N.Christodoulakis and N.N.Karanikolas
(eds), "Current Trends in Informatics", Vol.A, pp.469-477, New Technologies
Publications, Athens, 2007 (SET 978-960-89784-0-9)},
year={2007},
archivePrefix={arXiv},
eprint={0705.3360},
primaryClass={cs.AI}
} | sgarbas2007the |
arxiv-415 | 0705.3466 | Open Access Publishing in Particle Physics: A Brief Introduction for the non-Expert | <|reference_start|>Open Access Publishing in Particle Physics: A Brief Introduction for the non-Expert: Open Access to particle physics literature does not sound particularly new or exciting, since particle physicists have been reading preprints for decades, and arXiv.org for 15 years. However new movements in Europe are attempting to make the peer-reviewed literature of the field fully Open Access. This is not a new movement, nor is it restricted to this field. However, given the field's history of preprints and eprints, it is well suited to a change to a fully Open Access publishing model. Data shows that 90% of HEP published literature is freely available online, meaning that HEP libraries have little need for expensive journal subscriptions. As libraries begin to cancel journal subscriptions, the peer review process will lose its primary source of funding. Open Access publishing models can potentially address this issue. European physicists and funding agencies are proposing a consortium, SCOAP3, that might solve many of the objections to traditional Open Access publishing models in Particle Physics. These proposed changes should be viewed as a starting point for a serious look at the field's publication model, and are at least worthy of attention, if not adoption.<|reference_end|> | arxiv | @article{brooks2007open,
title={Open Access Publishing in Particle Physics: A Brief Introduction for the
non-Expert},
author={Travis C. Brooks},
journal={arXiv preprint arXiv:0705.3466},
year={2007},
number={SLAC-PUB-12507},
archivePrefix={arXiv},
eprint={0705.3466},
primaryClass={cs.DL}
} | brooks2007open |
arxiv-416 | 0705.3468 | Linear Tabling Strategies and Optimizations | <|reference_start|>Linear Tabling Strategies and Optimizations: Recently, the iterative approach named linear tabling has received considerable attention because of its simplicity, ease of implementation, and good space efficiency. Linear tabling is a framework from which different methods can be derived based on the strategies used in handling looping subgoals. One decision concerns when answers are consumed and returned. This paper describes two strategies, namely, {\it lazy} and {\it eager} strategies, and compares them both qualitatively and quantitatively. The results indicate that, while the lazy strategy has good locality and is well suited for finding all solutions, the eager strategy is comparable in speed with the lazy strategy and is well suited for programs with cuts. Linear tabling relies on depth-first iterative deepening rather than suspension to compute fixpoints. Each cluster of inter-dependent subgoals as represented by a top-most looping subgoal is iteratively evaluated until no subgoal in it can produce any new answers. Naive re-evaluation of all looping subgoals, albeit simple, may be computationally unacceptable. In this paper, we also introduce semi-naive optimization, an effective technique employed in bottom-up evaluation of logic programs to avoid redundant joins of answers, into linear tabling. We give the conditions for the technique to be safe (i.e. sound and complete) and propose an optimization technique called {\it early answer promotion} to enhance its effectiveness. Benchmarking in B-Prolog demonstrates that with this optimization linear tabling compares favorably well in speed with the state-of-the-art implementation of SLG.<|reference_end|> | arxiv | @article{zhou2007linear,
title={Linear Tabling Strategies and Optimizations},
author={Neng-Fa Zhou, Taisuke Sato, and Yi-Dong Shen},
journal={arXiv preprint arXiv:0705.3468},
year={2007},
archivePrefix={arXiv},
eprint={0705.3468},
primaryClass={cs.PL}
} | zhou2007linear |
arxiv-417 | 0705.3487 | Linearly bounded infinite graphs | <|reference_start|>Linearly bounded infinite graphs: Linearly bounded Turing machines have been mainly studied as acceptors for context-sensitive languages. We define a natural class of infinite automata representing their observable computational behavior, called linearly bounded graphs. These automata naturally accept the same languages as the linearly bounded machines defining them. We present some of their structural properties as well as alternative characterizations in terms of rewriting systems and context-sensitive transductions. Finally, we compare these graphs to rational graphs, which are another class of automata accepting the context-sensitive languages, and prove that in the bounded-degree case, rational graphs are a strict sub-class of linearly bounded graphs.<|reference_end|> | arxiv | @article{carayol2007linearly,
title={Linearly bounded infinite graphs},
author={Arnaud Carayol (IRISA), Antoine Meyer (LIAFA)},
journal={Acta Informatica 43, 4 (25/08/2006) p. 265-292},
year={2007},
doi={10.1007/s00236-006-0022-z},
archivePrefix={arXiv},
eprint={0705.3487},
primaryClass={cs.LO}
} | carayol2007linearly |
arxiv-418 | 0705.3503 | Making Random Choices Invisible to the Scheduler | <|reference_start|>Making Random Choices Invisible to the Scheduler: When dealing with process calculi and automata which express both nondeterministic and probabilistic behavior, it is customary to introduce the notion of scheduler to solve the nondeterminism. It has been observed that for certain applications, notably those in security, the scheduler needs to be restricted so not to reveal the outcome of the protocol's random choices, or otherwise the model of adversary would be too strong even for ``obviously correct'' protocols. We propose a process-algebraic framework in which the control on the scheduler can be specified in syntactic terms, and we show how to apply it to solve the problem mentioned above. We also consider the definition of (probabilistic) may and must preorders, and we show that they are precongruences with respect to the restricted schedulers. Furthermore, we show that all the operators of the language, except replication, distribute over probabilistic summation, which is a useful property for verification.<|reference_end|> | arxiv | @article{chatzikokolakis2007making,
title={Making Random Choices Invisible to the Scheduler},
author={Konstantinos Chatzikokolakis and Catuscia Palamidessi},
journal={arXiv preprint arXiv:0705.3503},
year={2007},
archivePrefix={arXiv},
eprint={0705.3503},
primaryClass={cs.CR cs.LO}
} | chatzikokolakis2007making |
arxiv-419 | 0705.3555 | Multidimensional Coded Modulation in Block-Fading Channnels | <|reference_start|>Multidimensional Coded Modulation in Block-Fading Channnels: We study the problem of constructing coded modulation schemes over multidimensional signal sets in Nakagami-$m$ block-fading channels. In particular, we consider the optimal diversity reliability exponent of the error probability when the multidimensional constellation is obtained as the rotation of classical complex-plane signal constellations. We show that multidimensional rotations of full dimension achieve the optimal diversity reliability exponent, also achieved by Gaussian constellations. Multidimensional rotations of full dimension induce a large decoding complexity, and in some cases it might be beneficial to use multiple rotations of smaller dimension. We also study the diversity reliability exponent in this case, which yields the optimal rate-diversity-complexity tradeoff in block-fading channels with discrete inputs.<|reference_end|> | arxiv | @article{fabregas2007multidimensional,
title={Multidimensional Coded Modulation in Block-Fading Channnels},
author={Albert Guillen i Fabregas and Giuseppe Caire},
journal={arXiv preprint arXiv:0705.3555},
year={2007},
archivePrefix={arXiv},
eprint={0705.3555},
primaryClass={cs.IT math.IT}
} | fabregas2007multidimensional |
arxiv-420 | 0705.3561 | Generalizing Consistency and other Constraint Properties to Quantified Constraints | <|reference_start|>Generalizing Consistency and other Constraint Properties to Quantified Constraints: Quantified constraints and Quantified Boolean Formulae are typically much more difficult to reason with than classical constraints, because quantifier alternation makes the usual notion of solution inappropriate. As a consequence, basic properties of Constraint Satisfaction Problems (CSP), such as consistency or substitutability, are not completely understood in the quantified case. These properties are important because they are the basis of most of the reasoning methods used to solve classical (existentially quantified) constraints, and one would like to benefit from similar reasoning methods in the resolution of quantified constraints. In this paper, we show that most of the properties that are used by solvers for CSP can be generalized to quantified CSP. This requires a re-thinking of a number of basic concepts; in particular, we propose a notion of outcome that generalizes the classical notion of solution and on which all definitions are based. We propose a systematic study of the relations which hold between these properties, as well as complexity results regarding the decision of these properties. Finally, and since these problems are typically intractable, we generalize the approach used in CSP and propose weaker, easier to check notions based on locality, which allow to detect these properties incompletely but in polynomial time.<|reference_end|> | arxiv | @article{bordeaux2007generalizing,
title={Generalizing Consistency and other Constraint Properties to Quantified
Constraints},
author={Lucas Bordeaux, Marco Cadoli, Toni Mancini},
journal={arXiv preprint arXiv:0705.3561},
year={2007},
archivePrefix={arXiv},
eprint={0705.3561},
primaryClass={cs.LO cs.AI}
} | bordeaux2007generalizing |
arxiv-421 | 0705.3593 | MI image registration using prior knowledge | <|reference_start|>MI image registration using prior knowledge: Subtraction of aligned images is a means to assess changes in a wide variety of clinical applications. In this paper we explore the information theoretical origin of Mutual Information (MI), which is based on Shannon's entropy.However, the interpretation of standard MI registration as a communication channel suggests that MI is too restrictive a criterion. In this paper the concept of Mutual Information (MI) is extended to (Normalized) Focussed Mutual Information (FMI) to incorporate prior knowledge to overcome some shortcomings of MI. We use this to develop new methodologies to successfully address specific registration problems, the follow-up of dental restorations, cephalometry, and the monitoring of implants.<|reference_end|> | arxiv | @article{jacquet2007mi,
title={MI image registration using prior knowledge},
author={W. Jacquet, P. de Groen},
journal={arXiv preprint arXiv:0705.3593},
year={2007},
archivePrefix={arXiv},
eprint={0705.3593},
primaryClass={cs.CV}
} | jacquet2007mi |
arxiv-422 | 0705.3610 | A Logic of Reachable Patterns in Linked Data-Structures | <|reference_start|>A Logic of Reachable Patterns in Linked Data-Structures: We define a new decidable logic for expressing and checking invariants of programs that manipulate dynamically-allocated objects via pointers and destructive pointer updates. The main feature of this logic is the ability to limit the neighborhood of a node that is reachable via a regular expression from a designated node. The logic is closed under boolean operations (entailment, negation) and has a finite model property. The key technical result is the proof of decidability. We show how to express precondition, postconditions, and loop invariants for some interesting programs. It is also possible to express properties such as disjointness of data-structures, and low-level heap mutations. Moreover, our logic can express properties of arbitrary data-structures and of an arbitrary number of pointer fields. The latter provides a way to naturally specify postconditions that relate the fields on entry to a procedure to the fields on exit. Therefore, it is possible to use the logic to automatically prove partial correctness of programs performing low-level heap mutations.<|reference_end|> | arxiv | @article{yorsh2007a,
title={A Logic of Reachable Patterns in Linked Data-Structures},
author={Greta Yorsh, Alexander Rabinovich, Mooly Sagiv, Antoine Meyer (LIAFA),
Ahmed Bouajjani (LIAFA)},
journal={Foundations of Software Science and Computation Structures
(29/03/2006) p. 94-110},
year={2007},
doi={10.1007/11690634_7},
archivePrefix={arXiv},
eprint={0705.3610},
primaryClass={cs.LO}
} | yorsh2007a |
arxiv-423 | 0705.3616 | On How Developers Test Open Source Software Systems | <|reference_start|>On How Developers Test Open Source Software Systems: Engineering software systems is a multidisciplinary activity, whereby a number of artifacts must be created - and maintained - synchronously. In this paper we investigate whether production code and the accompanying tests co-evolve by exploring a project's versioning system, code coverage reports and size-metrics. Three open source case studies teach us that testing activities usually start later on during the lifetime and are more "phased", although we did not observe increasing testing activity before releases. Furthermore, we note large differences in the levels of test coverage given the proportion of test code.<|reference_end|> | arxiv | @article{zaidman2007on,
title={On How Developers Test Open Source Software Systems},
author={Andy Zaidman, Bart Van Rompaey, Serge Demeyer, Arie van Deursen},
journal={arXiv preprint arXiv:0705.3616},
year={2007},
number={TUD-SERG-2007-012},
archivePrefix={arXiv},
eprint={0705.3616},
primaryClass={cs.SE}
} | zaidman2007on |
arxiv-424 | 0705.3631 | Triple-loop networks with arbitrarily many minimum distance diagrams | <|reference_start|>Triple-loop networks with arbitrarily many minimum distance diagrams: Minimum distance diagrams are a way to encode the diameter and routing information of multi-loop networks. For the widely studied case of double-loop networks, it is known that each network has at most two such diagrams and that they have a very definite form "L-shape''. In contrast, in this paper we show that there are triple-loop networks with an arbitrarily big number of associated minimum distance diagrams. For doing this, we build-up on the relations between minimum distance diagrams and monomial ideals.<|reference_end|> | arxiv | @article{sabariego2007triple-loop,
title={Triple-loop networks with arbitrarily many minimum distance diagrams},
author={Pilar Sabariego, Francisco Santos},
journal={Discrete Mathematics, 309(6), April 2009, 1672-1684.},
year={2007},
doi={10.1016/j.disc.2008.02.047},
archivePrefix={arXiv},
eprint={0705.3631},
primaryClass={math.CO cs.DM math.OC}
} | sabariego2007triple-loop |
arxiv-425 | 0705.3644 | Subjective Information Measure and Rate Fidelity Theory | <|reference_start|>Subjective Information Measure and Rate Fidelity Theory: Using fish-covering model, this paper intuitively explains how to extend Hartley's information formula to the generalized information formula step by step for measuring subjective information: metrical information (such as conveyed by thermometers), sensory information (such as conveyed by color vision), and semantic information (such as conveyed by weather forecasts). The pivotal step is to differentiate condition probability and logical condition probability of a message. The paper illustrates the rationality of the formula, discusses the coherence of the generalized information formula and Popper's knowledge evolution theory. For optimizing data compression, the paper discusses rate-of-limiting-errors and its similarity to complexity-distortion based on Kolmogorov's complexity theory, and improves the rate-distortion theory into the rate-fidelity theory by replacing Shannon's distortion with subjective mutual information. It is proved that both the rate-distortion function and the rate-fidelity function are equivalent to a rate-of-limiting-errors function with a group of fuzzy sets as limiting condition, and can be expressed by a formula of generalized mutual information for lossy coding, or by a formula of generalized entropy for lossless coding. By analyzing the rate-fidelity function related to visual discrimination and digitized bits of pixels of images, the paper concludes that subjective information is less than or equal to objective (Shannon's) information; there is an optimal matching point at which two kinds of information are equal; the matching information increases with visual discrimination (defined by confusing probability) rising; for given visual discrimination, too high resolution of images or too much objective information is wasteful.<|reference_end|> | arxiv | @article{lu2007subjective,
title={Subjective Information Measure and Rate Fidelity Theory},
author={Chenguang Lu},
journal={arXiv preprint arXiv:0705.3644},
year={2007},
archivePrefix={arXiv},
eprint={0705.3644},
primaryClass={cs.IT cs.HC math.IT}
} | lu2007subjective |
arxiv-426 | 0705.3669 | Structural Health Monitoring Using Neural Network Based Vibrational System Identification | <|reference_start|>Structural Health Monitoring Using Neural Network Based Vibrational System Identification: Composite fabrication technologies now provide the means for producing high-strength, low-weight panels, plates, spars and other structural components which use embedded fiber optic sensors and piezoelectric transducers. These materials, often referred to as smart structures, make it possible to sense internal characteristics, such as delaminations or structural degradation. In this effort we use neural network based techniques for modeling and analyzing dynamic structural information for recognizing structural defects. This yields an adaptable system which gives a measure of structural integrity for composite structures.<|reference_end|> | arxiv | @article{sofge2007structural,
title={Structural Health Monitoring Using Neural Network Based Vibrational
System Identification},
author={Donald A. Sofge},
journal={D. Sofge, "Structural Health Monitoring Using Neural Network Based
Vibrational System Identification," In Proceedings of the Australia-New
Zealand Conference on Intelligent Information Systems, pp. 91-94, IEEE, 1994},
year={2007},
doi={10.1109/ANZIIS.1994.396943},
archivePrefix={arXiv},
eprint={0705.3669},
primaryClass={cs.NE cs.CV cs.SD}
} | sofge2007structural |
arxiv-427 | 0705.3677 | Distributed Transmit Diversity in Relay Networks | <|reference_start|>Distributed Transmit Diversity in Relay Networks: We analyze fading relay networks, where a single-antenna source-destination terminal pair communicates through a set of half-duplex single-antenna relays using a two-hop protocol with linear processing at the relay level. A family of relaying schemes is presented which achieves the entire optimal diversity-multiplexing (DM) tradeoff curve. As a byproduct of our analysis, it follows that delay diversity and phase-rolling at the relay level are optimal with respect to the entire DM-tradeoff curve, provided the delays and the modulation frequencies, respectively, are chosen appropriately.<|reference_end|> | arxiv | @article{akçaba2007distributed,
title={Distributed Transmit Diversity in Relay Networks},
author={Cemal Akc{c}aba, Patrick Kuppinger and Helmut B"olcskei},
journal={arXiv preprint arXiv:0705.3677},
year={2007},
archivePrefix={arXiv},
eprint={0705.3677},
primaryClass={cs.IT math.IT}
} | akçaba2007distributed |
arxiv-428 | 0705.3683 | Power-Efficient Direct-Voting Assurance for Data Fusion in Wireless Sensor Networks | <|reference_start|>Power-Efficient Direct-Voting Assurance for Data Fusion in Wireless Sensor Networks: Wireless sensor networks place sensors into an area to collect data and send them back to a base station. Data fusion, which fuses the collected data before they are sent to the base station, is usually implemented over the network. Since the sensor is typically placed in locations accessible to malicious attackers, information assurance of the data fusion process is very important. A witness-based approach has been proposed to validate the fusion data. In this approach, the base station receives the fusion data and "votes" on the data from a randomly chosen sensor node. The vote comes from other sensor nodes, called "witnesses," to verify the correctness of the fusion data. Because the base station obtains the vote through the chosen node, the chosen node could forge the vote if it is compromised. Thus, the witness node must encrypt the vote to prevent this forgery. Compared with the vote, the encryption requires more bits, increasing transmission burden from the chosen node to the base station. The chosen node consumes more power. This work improves the witness-based approach using direct voting mechanism such that the proposed scheme has better performance in terms of assurance, overhead, and delay. The witness node transmits the vote directly to the base station. Forgery is not a problem in this scheme. Moreover, fewer bits are necessary to represent the vote, significantly reducing the power consumption. Performance analysis and simulation results indicate that the proposed approach can achieve a 40 times better overhead than the witness-based approach.<|reference_end|> | arxiv | @article{pai2007power-efficient,
title={Power-Efficient Direct-Voting Assurance for Data Fusion in Wireless
Sensor Networks},
author={H.-T. Pai and Y. S. Han},
journal={arXiv preprint arXiv:0705.3683},
year={2007},
archivePrefix={arXiv},
eprint={0705.3683},
primaryClass={cs.CR cs.DC cs.NI}
} | pai2007power-efficient |
arxiv-429 | 0705.3693 | Morphing Ensemble Kalman Filters | <|reference_start|>Morphing Ensemble Kalman Filters: A new type of ensemble filter is proposed, which combines an ensemble Kalman filter (EnKF) with the ideas of morphing and registration from image processing. This results in filters suitable for nonlinear problems whose solutions exhibit moving coherent features, such as thin interfaces in wildfire modeling. The ensemble members are represented as the composition of one common state with a spatial transformation, called registration mapping, plus a residual. A fully automatic registration method is used that requires only gridded data, so the features in the model state do not need to be identified by the user. The morphing EnKF operates on a transformed state consisting of the registration mapping and the residual. Essentially, the morphing EnKF uses intermediate states obtained by morphing instead of linear combinations of the states.<|reference_end|> | arxiv | @article{beezley2007morphing,
title={Morphing Ensemble Kalman Filters},
author={Jonathan D. Beezley, Jan Mandel},
journal={arXiv preprint arXiv:0705.3693},
year={2007},
doi={10.1111/j.1600-0870.2007.00275.x},
number={UCDHSC CCM Report 240},
archivePrefix={arXiv},
eprint={0705.3693},
primaryClass={math.DS cs.CV math.ST physics.ao-ph stat.ME stat.TH}
} | beezley2007morphing |
arxiv-430 | 0705.3740 | Optimal Iris Fuzzy Sketches | <|reference_start|>Optimal Iris Fuzzy Sketches: Fuzzy sketches, introduced as a link between biometry and cryptography, are a way of handling biometric data matching as an error correction issue. We focus here on iris biometrics and look for the best error-correcting code in that respect. We show that two-dimensional iterative min-sum decoding leads to results near the theoretical limits. In particular, we experiment our techniques on the Iris Challenge Evaluation (ICE) database and validate our findings.<|reference_end|> | arxiv | @article{bringer2007optimal,
title={Optimal Iris Fuzzy Sketches},
author={J. Bringer, H. Chabanne, G. Cohen, B. Kindarji, G. Z'emor},
journal={Biometrics: Theory, Applications, and Systems, 2007. BTAS 2007.
First IEEE International Conference on},
year={2007},
doi={10.1109/BTAS.2007.4401904},
archivePrefix={arXiv},
eprint={0705.3740},
primaryClass={cs.CR}
} | bringer2007optimal |
arxiv-431 | 0705.3748 | On the Obfuscation Complexity of Planar Graphs | <|reference_start|>On the Obfuscation Complexity of Planar Graphs: Being motivated by John Tantalo's Planarity Game, we consider straight line plane drawings of a planar graph $G$ with edge crossings and wonder how obfuscated such drawings can be. We define $obf(G)$, the obfuscation complexity of $G$, to be the maximum number of edge crossings in a drawing of $G$. Relating $obf(G)$ to the distribution of vertex degrees in $G$, we show an efficient way of constructing a drawing of $G$ with at least $obf(G)/3$ edge crossings. We prove bounds $(\delta(G)^2/24-o(1))n^2 < \obf G <3 n^2$ for an $n$-vertex planar graph $G$ with minimum vertex degree $\delta(G)\ge 2$. The shift complexity of $G$, denoted by $shift(G)$, is the minimum number of vertex shifts sufficient to eliminate all edge crossings in an arbitrarily obfuscated drawing of $G$ (after shifting a vertex, all incident edges are supposed to be redrawn correspondingly). If $\delta(G)\ge 3$, then $shift(G)$ is linear in the number of vertices due to the known fact that the matching number of $G$ is linear. However, in the case $\delta(G)\ge2$ we notice that $shift(G)$ can be linear even if the matching number is bounded. As for computational complexity, we show that, given a drawing $D$ of a planar graph, it is NP-hard to find an optimum sequence of shifts making $D$ crossing-free.<|reference_end|> | arxiv | @article{verbitsky2007on,
title={On the Obfuscation Complexity of Planar Graphs},
author={Oleg Verbitsky},
journal={arXiv preprint arXiv:0705.3748},
year={2007},
archivePrefix={arXiv},
eprint={0705.3748},
primaryClass={cs.DM cs.CC}
} | verbitsky2007on |
arxiv-432 | 0705.3751 | On the expressive power of planar perfect matching and permanents of bounded treewidth matrices | <|reference_start|>On the expressive power of planar perfect matching and permanents of bounded treewidth matrices: Valiant introduced some 25 years ago an algebraic model of computation along with the complexity classes VP and VNP, which can be viewed as analogues of the classical classes P and NP. They are defined using non-uniform sequences of arithmetic circuits and provides a framework to study the complexity for sequences of polynomials. Prominent examples of difficult (that is, VNP-complete) problems in this model includes the permanent and hamiltonian polynomials. While the permanent and hamiltonian polynomials in general are difficult to evaluate, there have been research on which special cases of these polynomials admits efficient evaluation. For instance, Barvinok has shown that if the underlying matrix has bounded rank, both the permanent and the hamiltonian polynomials can be evaluated in polynomial time, and thus are in VP. Courcelle, Makowsky and Rotics have shown that for matrices of bounded treewidth several difficult problems (including evaluating the permanent and hamiltonian polynomials) can be solved efficiently. An earlier result of this flavour is Kasteleyn's theorem which states that the sum of weights of perfect matchings of a planar graph can be computed in polynomial time, and thus is in VP also. For general graphs this problem is VNP-complete. In this paper we investigate the expressive power of the above results. We show that the permanent and hamiltonian polynomials for matrices of bounded treewidth both are equivalent to arithmetic formulas. Also, arithmetic weakly skew circuits are shown to be equivalent to the sum of weights of perfect matchings of planar graphs.<|reference_end|> | arxiv | @article{lyaudet2007on,
title={On the expressive power of planar perfect matching and permanents of
bounded treewidth matrices},
author={Laurent Lyaudet (LIP), Pascal Koiran (LIP), Uffe Flarup (IMADA)},
journal={arXiv preprint arXiv:0705.3751},
year={2007},
archivePrefix={arXiv},
eprint={0705.3751},
primaryClass={cs.DM cs.CC}
} | lyaudet2007on |
arxiv-433 | 0705.3766 | On complexity of optimized crossover for binary representations | <|reference_start|>On complexity of optimized crossover for binary representations: We consider the computational complexity of producing the best possible offspring in a crossover, given two solutions of the parents. The crossover operators are studied on the class of Boolean linear programming problems, where the Boolean vector of variables is used as the solution representation. By means of efficient reductions of the optimized gene transmitting crossover problems (OGTC) we show the polynomial solvability of the OGTC for the maximum weight set packing problem, the minimum weight set partition problem and for one of the versions of the simple plant location problem. We study a connection between the OGTC for linear Boolean programming problem and the maximum weight independent set problem on 2-colorable hypergraph and prove the NP-hardness of several special cases of the OGTC problem in Boolean linear programming.<|reference_end|> | arxiv | @article{eremeev2007on,
title={On complexity of optimized crossover for binary representations},
author={Anton Eremeev},
journal={arXiv preprint arXiv:0705.3766},
year={2007},
archivePrefix={arXiv},
eprint={0705.3766},
primaryClass={cs.NE cs.AI}
} | eremeev2007on |
arxiv-434 | 0705.3820 | Maximizing Maximal Angles for Plane Straight-Line Graphs | <|reference_start|>Maximizing Maximal Angles for Plane Straight-Line Graphs: Let $G=(S, E)$ be a plane straight-line graph on a finite point set $S\subset\R^2$ in general position. The incident angles of a vertex $p \in S$ of $G$ are the angles between any two edges of $G$ that appear consecutively in the circular order of the edges incident to $p$. A plane straight-line graph is called $\phi$-open if each vertex has an incident angle of size at least $\phi$. In this paper we study the following type of question: What is the maximum angle $\phi$ such that for any finite set $S\subset\R^2$ of points in general position we can find a graph from a certain class of graphs on $S$ that is $\phi$-open? In particular, we consider the classes of triangulations, spanning trees, and paths on $S$ and give tight bounds in most cases.<|reference_end|> | arxiv | @article{aichholzer2007maximizing,
title={Maximizing Maximal Angles for Plane Straight-Line Graphs},
author={Oswin Aichholzer, Thomas Hackl, Michael Hoffmann, Clemens Huemer,
Attila Por, Francisco Santos, Bettina Speckmann, Birgit Vogtenhuber},
journal={In "Algorithms and Data Structures, WADS 2007, Halifax, Canada,
August 15-17, 2007", Frank Dehne et al. (Eds.), LNCS 4619, Springer-Verlag,
2007, pp. 458-469},
year={2007},
doi={10.1007/978-3-540-73951-7_40},
archivePrefix={arXiv},
eprint={0705.3820},
primaryClass={cs.CG cs.DM math.CO}
} | aichholzer2007maximizing |
arxiv-435 | 0705.3888 | Symbolic Reachability Analysis of Higher-Order Context-Free Processes | <|reference_start|>Symbolic Reachability Analysis of Higher-Order Context-Free Processes: We consider the problem of symbolic reachability analysis of higher-order context-free processes. These models are generalizations of the context-free processes (also called BPA processes) where each process manipulates a data structure which can be seen as a nested stack of stacks. Our main result is that, for any higher-order context-free process, the set of all predecessors of a given regular set of configurations is regular and effectively constructible. This result generalizes the analogous result which is known for level 1 context-free processes. We show that this result holds also in the case of backward reachability analysis under a regular constraint on configurations. As a corollary, we obtain a symbolic model checking algorithm for the temporal logic E(U,X) with regular atomic predicates, i.e., the fragment of CTL restricted to the EU and EX modalities.<|reference_end|> | arxiv | @article{bouajjani2007symbolic,
title={Symbolic Reachability Analysis of Higher-Order Context-Free Processes},
author={Ahmed Bouajjani (LIAFA), Antoine Meyer (LIAFA)},
journal={FSTTCS 2004: Foundations of Software Technology and Theoretical
Computer Science (24/11/2004) 135-147},
year={2007},
doi={10.1007/b104325},
archivePrefix={arXiv},
eprint={0705.3888},
primaryClass={cs.LO}
} | bouajjani2007symbolic |
arxiv-436 | 0705.3895 | Towards Understanding the Origin of Genetic Languages | <|reference_start|>Towards Understanding the Origin of Genetic Languages: Molecular biology is a nanotechnology that works--it has worked for billions of years and in an amazing variety of circumstances. At its core is a system for acquiring, processing and communicating information that is universal, from viruses and bacteria to human beings. Advances in genetics and experience in designing computers have taken us to a stage where we can understand the optimisation principles at the root of this system, from the availability of basic building blocks to the execution of tasks. The languages of DNA and proteins are argued to be the optimal solutions to the information processing tasks they carry out. The analysis also suggests simpler predecessors to these languages, and provides fascinating clues about their origin. Obviously, a comprehensive unraveling of the puzzle of life would have a lot to say about what we may design or convert ourselves into.<|reference_end|> | arxiv | @article{patel2007towards,
title={Towards Understanding the Origin of Genetic Languages},
author={Apoorva D. Patel},
journal={arXiv preprint arXiv:0705.3895},
year={2007},
doi={10.1142/9781848162556_0010},
archivePrefix={arXiv},
eprint={0705.3895},
primaryClass={q-bio.GN cs.IT math.IT physics.bio-ph quant-ph}
} | patel2007towards |
arxiv-437 | 0705.3949 | Translating a first-order modal language to relational algebra | <|reference_start|>Translating a first-order modal language to relational algebra: This paper is about Kripke structures that are inside a relational database and queried with a modal language. At first the modal language that is used is introduced, followed by a definition of the database and relational algebra. Based on these definitions two things are presented: a mapping from components of the modal structure to a relational database schema and instance, and a translation from queries in the modal language to relational algebra queries.<|reference_end|> | arxiv | @article{havinga2007translating,
title={Translating a first-order modal language to relational algebra},
author={Yeb Havinga},
journal={arXiv preprint arXiv:0705.3949},
year={2007},
archivePrefix={arXiv},
eprint={0705.3949},
primaryClass={cs.LO cs.DB}
} | havinga2007translating |
arxiv-438 | 0705.3990 | Interior Point Decoding for Linear Vector Channels | <|reference_start|>Interior Point Decoding for Linear Vector Channels: In this paper, a novel decoding algorithm for low-density parity-check (LDPC) codes based on convex optimization is presented. The decoding algorithm, called interior point decoding, is designed for linear vector channels. The linear vector channels include many practically important channels such as inter symbol interference channels and partial response channels. It is shown that the maximum likelihood decoding (MLD) rule for a linear vector channel can be relaxed to a convex optimization problem, which is called a relaxed MLD problem. The proposed decoding algorithm is based on a numerical optimization technique so called interior point method with barrier function. Approximate variations of the gradient descent and the Newton methods are used to solve the convex optimization problem. In a decoding process of the proposed algorithm, a search point always lies in the fundamental polytope defined based on a low-density parity-check matrix. Compared with a convectional joint message passing decoder, the proposed decoding algorithm achieves better BER performance with less complexity in the case of partial response channels in many cases.<|reference_end|> | arxiv | @article{wadayama2007interior,
title={Interior Point Decoding for Linear Vector Channels},
author={Tadashi Wadayama},
journal={arXiv preprint arXiv:0705.3990},
year={2007},
doi={10.1088/1742-6596/95/1/012009},
archivePrefix={arXiv},
eprint={0705.3990},
primaryClass={cs.IT math.IT}
} | wadayama2007interior |
arxiv-439 | 0705.3992 | Average Stopping Set Weight Distribution of Redundant Random Matrix Ensembles | <|reference_start|>Average Stopping Set Weight Distribution of Redundant Random Matrix Ensembles: In this paper, redundant random matrix ensembles (abbreviated as redundant random ensembles) are defined and their stopping set (SS) weight distributions are analyzed. A redundant random ensemble consists of a set of binary matrices with linearly dependent rows. These linearly dependent rows (redundant rows) significantly reduce the number of stopping sets of small size. An upper and lower bound on the average SS weight distribution of the redundant random ensembles are shown. From these bounds, the trade-off between the number of redundant rows (corresponding to decoding complexity of BP on BEC) and the critical exponent of the asymptotic growth rate of SS weight distribution (corresponding to decoding performance) can be derived. It is shown that, in some cases, a dense matrix with linearly dependent rows yields asymptotically (i.e., in the regime of small erasure probability) better performance than regular LDPC matrices with comparable parameters.<|reference_end|> | arxiv | @article{wadayama2007average,
title={Average Stopping Set Weight Distribution of Redundant Random Matrix
Ensembles},
author={Tadashi Wadayama},
journal={arXiv preprint arXiv:0705.3992},
year={2007},
doi={10.1109/ISIT.2007.4557663},
archivePrefix={arXiv},
eprint={0705.3992},
primaryClass={cs.IT math.IT}
} | wadayama2007average |
arxiv-440 | 0705.3995 | On Undetected Error Probability of Binary Matrix Ensembles | <|reference_start|>On Undetected Error Probability of Binary Matrix Ensembles: In this paper, an analysis of the undetected error probability of ensembles of binary matrices is presented. The ensemble called the Bernoulli ensemble whose members are considered as matrices generated from i.i.d. Bernoulli source is mainly considered here. The main contributions of this work are (i) derivation of the error exponent of the average undetected error probability and (ii) closed form expressions for the variance of the undetected error probability. It is shown that the behavior of the exponent for a sparse ensemble is somewhat different from that for a dense ensemble. Furthermore, as a byproduct of the proof of the variance formula, simple covariance formula of the weight distribution is derived.<|reference_end|> | arxiv | @article{wadayama2007on,
title={On Undetected Error Probability of Binary Matrix Ensembles},
author={Tadashi Wadayama},
journal={arXiv preprint arXiv:0705.3995},
year={2007},
archivePrefix={arXiv},
eprint={0705.3995},
primaryClass={cs.IT math.IT}
} | wadayama2007on |
arxiv-441 | 0705.4045 | The use of the logarithm of the variate in the calculation of differential entropy among certain related statistical distributions | <|reference_start|>The use of the logarithm of the variate in the calculation of differential entropy among certain related statistical distributions: This paper demonstrates that basic statistics (mean, variance) of the logarithm of the variate itself can be used in the calculation of differential entropy among random variables known to be multiples and powers of a common underlying variate. For the same set of distributions, the variance of the differential self-information is shown also to be a function of statistics of the logarithmic variate. Then entropy and its "variance" can be estimated using only statistics of the logarithmic variate plus constants, without reference to the traditional parameters of the variate.<|reference_end|> | arxiv | @article{eccardt2007the,
title={The use of the logarithm of the variate in the calculation of
differential entropy among certain related statistical distributions},
author={Thomas M. Eccardt},
journal={arXiv preprint arXiv:0705.4045},
year={2007},
archivePrefix={arXiv},
eprint={0705.4045},
primaryClass={cs.IT math.IT}
} | eccardt2007the |
arxiv-442 | 0705.4064 | On Term Rewriting Systems Having a Rational Derivation | <|reference_start|>On Term Rewriting Systems Having a Rational Derivation: Several types of term rewriting systems can be distinguished by the way their rules overlap. In particular, we define the classes of prefix, suffix, bottom-up and top-down systems, which generalize similar classes on words. Our aim is to study the derivation relation of such systems (i.e. the reflexive and transitive closure of their rewriting relation) and, if possible, to provide a finite mechanism characterizing it. Using a notion of rational relations based on finite graph grammars, we show that the derivation of any bottom-up, top-down or suffix systems is rational, while it can be non recursive for prefix systems.<|reference_end|> | arxiv | @article{meyer2007on,
title={On Term Rewriting Systems Having a Rational Derivation},
author={Antoine Meyer (LIAFA)},
journal={Foundations of Software Science and Computation Structures
(05/03/2004) 378-392},
year={2007},
doi={10.1007/b95995},
archivePrefix={arXiv},
eprint={0705.4064},
primaryClass={cs.LO}
} | meyer2007on |
arxiv-443 | 0705.4085 | The Distance Geometry of Music | <|reference_start|>The Distance Geometry of Music: We demonstrate relationships between the classic Euclidean algorithm and many other fields of study, particularly in the context of music and distance geometry. Specifically, we show how the structure of the Euclidean algorithm defines a family of rhythms which encompass over forty timelines (\emph{ostinatos}) from traditional world music. We prove that these \emph{Euclidean rhythms} have the mathematical property that their onset patterns are distributed as evenly as possible: they maximize the sum of the Euclidean distances between all pairs of onsets, viewing onsets as points on a circle. Indeed, Euclidean rhythms are the unique rhythms that maximize this notion of \emph{evenness}. We also show that essentially all Euclidean rhythms are \emph{deep}: each distinct distance between onsets occurs with a unique multiplicity, and these multiplicies form an interval $1,2,...,k-1$. Finally, we characterize all deep rhythms, showing that they form a subclass of generated rhythms, which in turn proves a useful property called shelling. All of our results for musical rhythms apply equally well to musical scales. In addition, many of the problems we explore are interesting in their own right as distance geometry problems on the circle; some of the same problems were explored by Erd\H{o}s in the plane.<|reference_end|> | arxiv | @article{demaine2007the,
title={The Distance Geometry of Music},
author={Erik D. Demaine, Francisco Gomez-Martin, Henk Meijer, David Rappaport,
Perouz Taslakian, Godfried T. Toussaint, Terry Winograd, David R. Wood},
journal={arXiv preprint arXiv:0705.4085},
year={2007},
archivePrefix={arXiv},
eprint={0705.4085},
primaryClass={cs.CG}
} | demaine2007the |
arxiv-444 | 0705.4094 | Efficiency and Nash Equilibria in a Scrip System for P2P Networks | <|reference_start|>Efficiency and Nash Equilibria in a Scrip System for P2P Networks: A model of providing service in a P2P network is analyzed. It is shown that by adding a scrip system, a mechanism that admits a reasonable Nash equilibrium that reduces free riding can be obtained. The effect of varying the total amount of money (scrip) in the system on efficiency (i.e., social welfare) is analyzed, and it is shown that by maintaining the appropriate ratio between the total amount of money and the number of agents, efficiency is maximized. The work has implications for many online systems, not only P2P networks but also a wide variety of online forums for which scrip systems are popular, but formal analyses have been lacking.<|reference_end|> | arxiv | @article{friedman2007efficiency,
title={Efficiency and Nash Equilibria in a Scrip System for P2P Networks},
author={Eric j. Friedman, Joseph Y. Halpern, Ian Kash},
journal={arXiv preprint arXiv:0705.4094},
year={2007},
archivePrefix={arXiv},
eprint={0705.4094},
primaryClass={cs.GT}
} | friedman2007efficiency |
arxiv-445 | 0705.4110 | Optimizing Scrip Systems: Efficiency, Crashes, Hoarders, and Altruists | <|reference_start|>Optimizing Scrip Systems: Efficiency, Crashes, Hoarders, and Altruists: We discuss the design of efficient scrip systems and develop tools for empirically analyzing them. For those interested in the empirical study of scrip systems, we demonstrate how characteristics of agents in a system can be inferred from the equilibrium distribution of money. From the perspective of a system designer, we examine the effect of the money supply on social welfare and show that social welfare is maximized by increasing the money supply up to the point that the system experiences a ``monetary crash,'' where money is sufficiently devalued that no agent is willing to perform a service. We also examine the implications of the presence of altruists and hoarders on the performance of the system. While a small number of altruists may improve social welfare, too many can also cause the system to experience a monetary crash, which may be bad for social welfare. Hoarders generally decrease social welfare but, surprisingly, they also promote system stability by helping prevent monetary crashes. In addition, we provide new technical tools for analyzing and computing equilibria by showing that our model exhibits strategic complementarities, which implies that there exist equilibria in pure strategies that can be computed efficiently.<|reference_end|> | arxiv | @article{kash2007optimizing,
title={Optimizing Scrip Systems: Efficiency, Crashes, Hoarders, and Altruists},
author={Ian A. Kash, Eric J. Friedman, Joseph Y. Halpern},
journal={arXiv preprint arXiv:0705.4110},
year={2007},
archivePrefix={arXiv},
eprint={0705.4110},
primaryClass={cs.GT}
} | kash2007optimizing |
arxiv-446 | 0705.4134 | The Battery-Discharge-Model: A Class of Stochastic Finite Automata to Simulate Multidimensional Continued Fraction Expansion | <|reference_start|>The Battery-Discharge-Model: A Class of Stochastic Finite Automata to Simulate Multidimensional Continued Fraction Expansion: We define an infinite stochastic state machine, the Battery-Discharge-Model (BDM), which simulates the behaviour of linear and jump complexity of the continued fraction expansion of multidimensional formal power series, a relevant security measure in the cryptanalysis of stream ciphers. We also obtain finite approximations to the infinite BDM, where polynomially many states suffice to approximate with an exponentially small error the probabilities and averages for linear and jump complexity of M-multisequences of length n over the finite field F_q, for any M, n, q.<|reference_end|> | arxiv | @article{vielhaber2007the,
title={The Battery-Discharge-Model: A Class of Stochastic Finite Automata to
Simulate Multidimensional Continued Fraction Expansion},
author={Michael Vielhaber and Monica del Pilar Canales},
journal={arXiv preprint arXiv:0705.4134},
year={2007},
archivePrefix={arXiv},
eprint={0705.4134},
primaryClass={cs.IT cs.CC cs.CR math.IT}
} | vielhaber2007the |
arxiv-447 | 0705.4138 | The Asymptotic Normalized Linear Complexity of Multisequences | <|reference_start|>The Asymptotic Normalized Linear Complexity of Multisequences: We show that the asymptotic linear complexity of a multisequence a in F_q^\infty that is I := liminf L_a(n)/n and S := limsup L_a(n)/n satisfy the inequalities M/(M+1) <= S <= 1 and M(1-S) <= I <= 1-S/M, if all M sequences have nonzero discrepancy infinitely often, and all pairs (I,S) satisfying these conditions are met by 2^{\aleph_0} multisequences a. This answers an Open Problem by Dai, Imamura, and Yang. Keywords: Linear complexity, multisequence, Battery Discharge Model, isometry.<|reference_end|> | arxiv | @article{vielhaber2007the,
title={The Asymptotic Normalized Linear Complexity of Multisequences},
author={Michael Vielhaber and Monica del Pilar Canales},
journal={arXiv preprint arXiv:0705.4138},
year={2007},
archivePrefix={arXiv},
eprint={0705.4138},
primaryClass={cs.IT cs.CC cs.CR math.IT}
} | vielhaber2007the |
arxiv-448 | 0705.4171 | Grover search algorithm | <|reference_start|>Grover search algorithm: A quantum algorithm is a set of instructions for a quantum computer, however, unlike algorithms in classical computer science their results cannot be guaranteed. A quantum system can undergo two types of operation, measurement and quantum state transformation, operations themselves must be unitary (reversible). Most quantum algorithms involve a series of quantum state transformations followed by a measurement. Currently very few quantum algorithms are known and no general design methodology exists for their construction.<|reference_end|> | arxiv | @article{borbely2007grover,
title={Grover search algorithm},
author={Eva Borbely},
journal={arXiv preprint arXiv:0705.4171},
year={2007},
archivePrefix={arXiv},
eprint={0705.4171},
primaryClass={cs.DS}
} | borbely2007grover |
arxiv-449 | 0705.4185 | Secure Two-party Protocols for Point Inclusion Problem | <|reference_start|>Secure Two-party Protocols for Point Inclusion Problem: It is well known that, in theory, the general secure multi-party computation problem is solvable using circuit evaluation protocols. However, the communication complexity of the resulting protocols depend on the size of the circuit that expresses the functionality to be computed and hence can be impractical. Hence special solutions are needed for specific problems for efficiency reasons. The point inclusion problem in computational geometry is a special multiparty computation and has got many applications. Previous protocols for the secure point inclusion problem are not adequate. In this paper we modify some known solutions to the point inclusion problem in computational geometry to the frame work of secure two-party computation.<|reference_end|> | arxiv | @article{thomas2007secure,
title={Secure Two-party Protocols for Point Inclusion Problem},
author={Tony Thomas},
journal={arXiv preprint arXiv:0705.4185},
year={2007},
archivePrefix={arXiv},
eprint={0705.4185},
primaryClass={cs.CR}
} | thomas2007secure |
arxiv-450 | 0705.4226 | Second-Order Type Isomorphisms Through Game Semantics | <|reference_start|>Second-Order Type Isomorphisms Through Game Semantics: The characterization of second-order type isomorphisms is a purely syntactical problem that we propose to study under the enlightenment of game semantics. We study this question in the case of second-order λ$\mu$-calculus, which can be seen as an extension of system F to classical logic, and for which we define a categorical framework: control hyperdoctrines. Our game model of λ$\mu$-calculus is based on polymorphic arenas (closely related to Hughes' hyperforests) which evolve during the play (following the ideas of Murawski-Ong). We show that type isomorphisms coincide with the "equality" on arenas associated with types. Finally we deduce the equational characterization of type isomorphisms from this equality. We also recover from the same model Roberto Di Cosmo's characterization of type isomorphisms for system F. This approach leads to a geometrical comprehension on the question of second order type isomorphisms, which can be easily extended to some other polymorphic calculi including additional programming features.<|reference_end|> | arxiv | @article{de lataillade2007second-order,
title={Second-Order Type Isomorphisms Through Game Semantics},
author={Joachim De Lataillade (PPS)},
journal={arXiv preprint arXiv:0705.4226},
year={2007},
archivePrefix={arXiv},
eprint={0705.4226},
primaryClass={cs.LO}
} | de lataillade2007second-order |
arxiv-451 | 0705.4228 | Curry-style type Isomorphisms and Game Semantics | <|reference_start|>Curry-style type Isomorphisms and Game Semantics: Curry-style system F, ie. system F with no explicit types in terms, can be seen as a core presentation of polymorphism from the point of view of programming languages. This paper gives a characterisation of type isomorphisms for this language, by using a game model whose intuitions come both from the syntax and from the game semantics universe. The model is composed of: an untyped part to interpret terms, a notion of game to interpret types, and a typed part to express the fact that an untyped strategy plays on a game. By analysing isomorphisms in the model, we prove that the equational system corresponding to type isomorphisms for Curry-style system F is the extension of the equational system for Church-style isomorphisms with a new, non-trivial equation: forall X.A = A[forall Y.Y/X] if X appears only positively in A.<|reference_end|> | arxiv | @article{de lataillade2007curry-style,
title={Curry-style type Isomorphisms and Game Semantics},
author={Joachim De Lataillade (PPS)},
journal={arXiv preprint arXiv:0705.4228},
year={2007},
archivePrefix={arXiv},
eprint={0705.4228},
primaryClass={cs.LO}
} | de lataillade2007curry-style |
arxiv-452 | 0705.4302 | Truecluster matching | <|reference_start|>Truecluster matching: Cluster matching by permuting cluster labels is important in many clustering contexts such as cluster validation and cluster ensemble techniques. The classic approach is to minimize the euclidean distance between two cluster solutions which induces inappropriate stability in certain settings. Therefore, we present the truematch algorithm that introduces two improvements best explained in the crisp case. First, instead of maximizing the trace of the cluster crosstable, we propose to maximize a chi-square transformation of this crosstable. Thus, the trace will not be dominated by the cells with the largest counts but by the cells with the most non-random observations, taking into account the marginals. Second, we suggest a probabilistic component in order to break ties and to make the matching algorithm truly random on random data. The truematch algorithm is designed as a building block of the truecluster framework and scales in polynomial time. First simulation results confirm that the truematch algorithm gives more consistent truecluster results for unequal cluster sizes. Free R software is available.<|reference_end|> | arxiv | @article{oehlschlägel2007truecluster,
title={Truecluster matching},
author={Jens Oehlschl"agel},
journal={arXiv preprint arXiv:0705.4302},
year={2007},
archivePrefix={arXiv},
eprint={0705.4302},
primaryClass={cs.AI}
} | oehlschlägel2007truecluster |
arxiv-453 | 0705.4320 | Defect-Tolerant CMOL Cell Assignment via Satisfiability | <|reference_start|>Defect-Tolerant CMOL Cell Assignment via Satisfiability: We present a CAD framework for CMOL, a hybrid CMOS/ molecular circuit architecture. Our framework first transforms any logically synthesized circuit based on AND/OR/NOT gates to a NOR gate circuit, and then maps the NOR gates to CMOL. We encode the CMOL cell assignment problem as boolean conditions. The boolean constraint is satisfiable if and only if there is a way to map all the NOR gates to the CMOL cells. We further investigate various types of static defects for the CMOL architecture, and propose a reconfiguration technique that can deal with these defects through our CAD framework. This is the first automated framework for CMOL cell assignment, and the first to model several different CMOL static defects. Empirical results show that our approach is efficient and scalable.<|reference_end|> | arxiv | @article{hung2007defect-tolerant,
title={Defect-Tolerant CMOL Cell Assignment via Satisfiability},
author={William N. N. Hung, Changjian Gao, Xiaoyu Song, Dan Hammerstrom},
journal={arXiv preprint arXiv:0705.4320},
year={2007},
archivePrefix={arXiv},
eprint={0705.4320},
primaryClass={cs.DM cs.DS}
} | hung2007defect-tolerant |
arxiv-454 | 0705.4369 | Computing Integer Powers in Floating-Point Arithmetic | <|reference_start|>Computing Integer Powers in Floating-Point Arithmetic: We introduce two algorithms for accurately evaluating powers to a positive integer in floating-point arithmetic, assuming a fused multiply-add (fma) instruction is available. We show that our log-time algorithm always produce faithfully-rounded results, discuss the possibility of getting correctly rounded results, and show that results correctly rounded in double precision can be obtained if extended-precision is available with the possibility to round into double precision (with a single rounding).<|reference_end|> | arxiv | @article{kornerup2007computing,
title={Computing Integer Powers in Floating-Point Arithmetic},
author={Peter Kornerup (IMADA), Vincent Lef`evre (LIP), Jean-Michel Muller
(LIP)},
journal={arXiv preprint arXiv:0705.4369},
year={2007},
archivePrefix={arXiv},
eprint={0705.4369},
primaryClass={cs.NA cs.MS}
} | kornerup2007computing |
arxiv-455 | 0705.4415 | PERCEVAL: a Computer-Driven System for Experimentation on Auditory and Visual Perception | <|reference_start|>PERCEVAL: a Computer-Driven System for Experimentation on Auditory and Visual Perception: Since perception tests are highly time-consuming, there is a need to automate as many operations as possible, such as stimulus generation, procedure control, perception testing, and data analysis. The computer-driven system we are presenting here meets these objectives. To achieve large flexibility, the tests are controlled by scripts. The system's core software resembles that of a lexical-syntactic analyzer, which reads and interprets script files sent to it. The execution sequence (trial) is modified in accordance with the commands and data received. This type of operation provides a great deal of flexibility and supports a wide variety of tests such as auditory-lexical decision making, phoneme monitoring, gating, phonetic categorization, word identification, voice quality, etc. To achieve good performance, we were careful about timing accuracy, which is the greatest problem in computerized perception tests.<|reference_end|> | arxiv | @article{andré2007perceval:,
title={PERCEVAL: a Computer-Driven System for Experimentation on Auditory and
Visual Perception},
author={Carine Andr'e (LPL), Alain Ghio (LPL), Christian Cav'e (LPL),
Bernard Teston (LPL)},
journal={Proceedings of International Congress of Phonetic Sciences (ICPhS)
(2003) 1421-1424},
year={2007},
number={1557},
archivePrefix={arXiv},
eprint={0705.4415},
primaryClass={cs.SE}
} | andré2007perceval: |
arxiv-456 | 0705.4442 | World-set Decompositions: Expressiveness and Efficient Algorithms | <|reference_start|>World-set Decompositions: Expressiveness and Efficient Algorithms: Uncertain information is commonplace in real-world data management scenarios. The ability to represent large sets of possible instances (worlds) while supporting efficient storage and processing is an important challenge in this context. The recent formalism of world-set decompositions (WSDs) provides a space-efficient representation for uncertain data that also supports scalable processing. WSDs are complete for finite world-sets in that they can represent any finite set of possible worlds. For possibly infinite world-sets, we show that a natural generalization of WSDs precisely captures the expressive power of c-tables. We then show that several important decision problems are efficiently solvable on WSDs while they are NP-hard on c-tables. Finally, we give a polynomial-time algorithm for factorizing WSDs, i.e. an efficient algorithm for minimizing such representations.<|reference_end|> | arxiv | @article{olteanu2007world-set,
title={World-set Decompositions: Expressiveness and Efficient Algorithms},
author={Dan Olteanu and Christoph Koch and Lyublena Antova},
journal={arXiv preprint arXiv:0705.4442},
year={2007},
archivePrefix={arXiv},
eprint={0705.4442},
primaryClass={cs.DB}
} | olteanu2007world-set |
arxiv-457 | 0705.4485 | Mixed membership stochastic blockmodels | <|reference_start|>Mixed membership stochastic blockmodels: Observations consisting of measurements on relationships for pairs of objects arise in many settings, such as protein interaction and gene regulatory networks, collections of author-recipient email, and social networks. Analyzing such data with probabilisic models can be delicate because the simple exchangeability assumptions underlying many boilerplate models no longer hold. In this paper, we describe a latent variable model of such data called the mixed membership stochastic blockmodel. This model extends blockmodels for relational data to ones which capture mixed membership latent relational structure, thus providing an object-specific low-dimensional representation. We develop a general variational inference algorithm for fast approximate posterior inference. We explore applications to social and protein interaction networks.<|reference_end|> | arxiv | @article{airoldi2007mixed,
title={Mixed membership stochastic blockmodels},
author={Edoardo M Airoldi, David M Blei, Stephen E Fienberg, Eric P Xing},
journal={Journal of Machine Learning Research, 9, 1981-2014.},
year={2007},
archivePrefix={arXiv},
eprint={0705.4485},
primaryClass={stat.ME cs.LG math.ST physics.soc-ph stat.ML stat.TH}
} | airoldi2007mixed |
arxiv-458 | 0705.4566 | Loop corrections for message passing algorithms in continuous variable models | <|reference_start|>Loop corrections for message passing algorithms in continuous variable models: In this paper we derive the equations for Loop Corrected Belief Propagation on a continuous variable Gaussian model. Using the exactness of the averages for belief propagation for Gaussian models, a different way of obtaining the covariances is found, based on Belief Propagation on cavity graphs. We discuss the relation of this loop correction algorithm to Expectation Propagation algorithms for the case in which the model is no longer Gaussian, but slightly perturbed by nonlinear terms.<|reference_end|> | arxiv | @article{wemmenhove2007loop,
title={Loop corrections for message passing algorithms in continuous variable
models},
author={Bastian Wemmenhove and Bert Kappen},
journal={arXiv preprint arXiv:0705.4566},
year={2007},
archivePrefix={arXiv},
eprint={0705.4566},
primaryClass={cs.AI cs.LG}
} | wemmenhove2007loop |
arxiv-459 | 0705.4584 | Modeling Epidemic Spread in Synthetic Populations - Virtual Plagues in Massively Multiplayer Online Games | <|reference_start|>Modeling Epidemic Spread in Synthetic Populations - Virtual Plagues in Massively Multiplayer Online Games: A virtual plague is a process in which a behavior-affecting property spreads among characters in a Massively Multiplayer Online Game (MMOG). The MMOG individuals constitute a synthetic population, and the game can be seen as a form of interactive executable model for studying disease spread, albeit of a very special kind. To a game developer maintaining an MMOG, recognizing, monitoring, and ultimately controlling a virtual plague is important, regardless of how it was initiated. The prospect of using tools, methods and theory from the field of epidemiology to do this seems natural and appealing. We will address the feasibility of such a prospect, first by considering some basic measures used in epidemiology, then by pointing out the differences between real world epidemics and virtual plagues. We also suggest directions for MMOG developer control through epidemiological modeling. Our aim is understanding the properties of virtual plagues, rather than trying to eliminate them or mitigate their effects, as would be in the case of real infectious disease.<|reference_end|> | arxiv | @article{boman2007modeling,
title={Modeling Epidemic Spread in Synthetic Populations - Virtual Plagues in
Massively Multiplayer Online Games},
author={Magnus Boman and Stefan J. Johansson},
journal={arXiv preprint arXiv:0705.4584},
year={2007},
archivePrefix={arXiv},
eprint={0705.4584},
primaryClass={cs.CY cs.AI cs.MA}
} | boman2007modeling |
arxiv-460 | 0705.4604 | Temporal Runtime Verification using Monadic Difference Logic | <|reference_start|>Temporal Runtime Verification using Monadic Difference Logic: In this paper we present an algorithm for performing runtime verification of a bounded temporal logic over timed runs. The algorithm consists of three elements. First, the bounded temporal formula to be verified is translated into a monadic first-order logic over difference inequalities, which we call monadic difference logic. Second, at each step of the timed run, the monadic difference formula is modified by computing a quotient with the state and time of that step. Third, the resulting formula is checked for being a tautology or being unsatisfiable by a decision procedure for monadic difference logic. We further provide a simple decision procedure for monadic difference logic based on the data structure Difference Decision Diagrams. The algorithm is complete in a very strong sense on a subclass of temporal formulae characterized as homogeneously monadic and it is approximate on other formulae. The approximation comes from the fact that not all unsatisfiable or tautological formulae are recognised at the earliest possible time of the runtime verification. Contrary to existing approaches, the presented algorithms do not work by syntactic rewriting but employ efficient decision structures which make them applicable in real applications within for instance business software.<|reference_end|> | arxiv | @article{andersen2007temporal,
title={Temporal Runtime Verification using Monadic Difference Logic},
author={Henrik Reif Andersen and Kaare J. Kristoffersen},
journal={arXiv preprint arXiv:0705.4604},
year={2007},
archivePrefix={arXiv},
eprint={0705.4604},
primaryClass={cs.LO}
} | andersen2007temporal |
arxiv-461 | 0705.4606 | Dynamic User-Defined Similarity Searching in Semi-Structured Text Retrieval | <|reference_start|>Dynamic User-Defined Similarity Searching in Semi-Structured Text Retrieval: Modern text retrieval systems often provide a similarity search utility, that allows the user to find efficiently a fixed number k of documents in the data set that are most similar to a given query (here a query is either a simple sequence of keywords or the identifier of a full document found in previous searches that is considered of interest). We consider the case of a textual database made of semi-structured documents. Each field, in turns, is modelled with a specific vector space. The problem is more complex when we also allow each such vector space to have an associated user-defined dynamic weight that influences its contribution to the overall dynamic aggregated and weighted similarity. This dynamic problem has been tackled in a recent paper by Singitham et al. in in VLDB 2004. Their proposed solution, which we take as baseline, is a variant of the cluster-pruning technique that has the potential for scaling to very large corpora of documents, and is far more efficient than the naive exhaustive search. We devise an alternative way of embedding weights in the data structure, coupled with a non-trivial application of a clustering algorithm based on the furthest point first heuristic for the metric k-center problem. The validity of our approach is demonstrated experimentally by showing significant performance improvements over the scheme proposed in Singitham et al. in VLDB 2004. We improve significantly tradeoffs between query time and output quality with respect to the baseline method in Singitham et al. in in VLDB 2004, and also with respect to a novel method by Chierichetti et al. to appear in ACM PODS 2007. We also speed up the pre-processing time by a factor at least thirty.<|reference_end|> | arxiv | @article{geraci2007dynamic,
title={Dynamic User-Defined Similarity Searching in Semi-Structured Text
Retrieval},
author={Filippo Geraci and Marco Pellegrini},
journal={arXiv preprint arXiv:0705.4606},
year={2007},
number={IIT TR-07/2007},
archivePrefix={arXiv},
eprint={0705.4606},
primaryClass={cs.IR cs.DS}
} | geraci2007dynamic |
arxiv-462 | 0705.4618 | An Improved Tight Closure Algorithm for Integer Octagonal Constraints | <|reference_start|>An Improved Tight Closure Algorithm for Integer Octagonal Constraints: Integer octagonal constraints (a.k.a. ``Unit Two Variables Per Inequality'' or ``UTVPI integer constraints'') constitute an interesting class of constraints for the representation and solution of integer problems in the fields of constraint programming and formal analysis and verification of software and hardware systems, since they couple algorithms having polynomial complexity with a relatively good expressive power. The main algorithms required for the manipulation of such constraints are the satisfiability check and the computation of the inferential closure of a set of constraints. The latter is called `tight' closure to mark the difference with the (incomplete) closure algorithm that does not exploit the integrality of the variables. In this paper we present and fully justify an O(n^3) algorithm to compute the tight closure of a set of UTVPI integer constraints.<|reference_end|> | arxiv | @article{bagnara2007an,
title={An Improved Tight Closure Algorithm for Integer Octagonal Constraints},
author={Roberto Bagnara, Patricia M. Hill, Enea Zaffanella},
journal={arXiv preprint arXiv:0705.4618},
year={2007},
archivePrefix={arXiv},
eprint={0705.4618},
primaryClass={cs.DS cs.CG cs.LO}
} | bagnara2007an |
arxiv-463 | 0705.4654 | Local Area Damage Detection in Composite Structures Using Piezoelectric Transducers | <|reference_start|>Local Area Damage Detection in Composite Structures Using Piezoelectric Transducers: An integrated and automated smart structures approach for structural health monitoring is presented, utilizing an array of piezoelectric transducers attached to or embedded within the structure for both actuation and sensing. The system actively interrogates the structure via broadband excitation of multiple actuators across a desired frequency range. The structure's vibration signature is then characterized by computing the transfer functions between each actuator/sensor pair, and compared to the baseline signature. Experimental results applying the system to local area damage detection in a MD Explorer rotorcraft composite flexbeam are presented.<|reference_end|> | arxiv | @article{lichtenwalner2007local,
title={Local Area Damage Detection in Composite Structures Using Piezoelectric
Transducers},
author={Peter F. Lichtenwalner and Donald A. Sofge},
journal={P.F. Lichtenwalner and D. Sofge, "Local Area Damage Detection in
Composite Structures Using Piezoelectric Transducers," In Proc. SPIE Sym. on
Smart Structures and Materials, Vol. 3326, SPIE, pp. 509-515, 1998},
year={2007},
doi={10.1117/12.310667},
archivePrefix={arXiv},
eprint={0705.4654},
primaryClass={cs.SD cs.CV}
} | lichtenwalner2007local |
arxiv-464 | 0705.4658 | Two sources are better than one for increasing the Kolmogorov complexity of infinite sequences | <|reference_start|>Two sources are better than one for increasing the Kolmogorov complexity of infinite sequences: The randomness rate of an infinite binary sequence is characterized by the sequence of ratios between the Kolmogorov complexity and the length of the initial segments of the sequence. It is known that there is no uniform effective procedure that transforms one input sequence into another sequence with higher randomness rate. By contrast, we display such a uniform effective procedure having as input two independent sequences with positive but arbitrarily small constant randomness rate. Moreover the transformation is a truth-table reduction and the output has randomness rate arbitrarily close to 1.<|reference_end|> | arxiv | @article{zimand2007two,
title={Two sources are better than one for increasing the Kolmogorov complexity
of infinite sequences},
author={Marius Zimand},
journal={arXiv preprint arXiv:0705.4658},
year={2007},
archivePrefix={arXiv},
eprint={0705.4658},
primaryClass={cs.IT cs.CC math.IT}
} | zimand2007two |
arxiv-465 | 0705.4673 | A randomized algorithm for the on-line weighted bipartite matching problem | <|reference_start|>A randomized algorithm for the on-line weighted bipartite matching problem: We study the on-line minimum weighted bipartite matching problem in arbitrary metric spaces. Here, $n$ not necessary disjoint points of a metric space $M$ are given, and are to be matched on-line with $n$ points of $M$ revealed one by one. The cost of a matching is the sum of the distances of the matched points, and the goal is to find or approximate its minimum. The competitive ratio of the deterministic problem is known to be $\Theta(n)$. It was conjectured that a randomized algorithm may perform better against an oblivious adversary, namely with an expected competitive ratio $\Theta(\log n)$. We prove a slightly weaker result by showing a $o(\log^3 n)$ upper bound on the expected competitive ratio. As an application the same upper bound holds for the notoriously hard fire station problem, where $M$ is the real line.<|reference_end|> | arxiv | @article{csaba2007a,
title={A randomized algorithm for the on-line weighted bipartite matching
problem},
author={B'ela Csaba (Anal. and Stoch. Res. Group, HAS), Andr'as S. Pluh'ar
(Dept. of Comp. Sci., Univ. of Szeged)},
journal={arXiv preprint arXiv:0705.4673},
year={2007},
archivePrefix={arXiv},
eprint={0705.4673},
primaryClass={cs.DS cs.DM}
} | csaba2007a |
arxiv-466 | 0705.4676 | Recursive n-gram hashing is pairwise independent, at best | <|reference_start|>Recursive n-gram hashing is pairwise independent, at best: Many applications use sequences of n consecutive symbols (n-grams). Hashing these n-grams can be a performance bottleneck. For more speed, recursive hash families compute hash values by updating previous values. We prove that recursive hash families cannot be more than pairwise independent. While hashing by irreducible polynomials is pairwise independent, our implementations either run in time O(n) or use an exponential amount of memory. As a more scalable alternative, we make hashing by cyclic polynomials pairwise independent by ignoring n-1 bits. Experimentally, we show that hashing by cyclic polynomials is is twice as fast as hashing by irreducible polynomials. We also show that randomized Karp-Rabin hash families are not pairwise independent.<|reference_end|> | arxiv | @article{lemire2007recursive,
title={Recursive n-gram hashing is pairwise independent, at best},
author={Daniel Lemire and Owen Kaser},
journal={Computer Speech & Language 24(4): 698-710 (2010)},
year={2007},
doi={10.1016/j.csl.2009.12.001},
archivePrefix={arXiv},
eprint={0705.4676},
primaryClass={cs.DB cs.CL}
} | lemire2007recursive |
arxiv-467 | 0706.0014 | Towards an exact adaptive algorithm for the determinant of a rational matrix | <|reference_start|>Towards an exact adaptive algorithm for the determinant of a rational matrix: In this paper we propose several strategies for the exact computation of the determinant of a rational matrix. First, we use the Chinese Remaindering Theorem and the rational reconstruction to recover the rational determinant from its modular images. Then we show a preconditioning for the determinant which allows us to skip the rational reconstruction process and reconstruct an integer result. We compare those approaches with matrix preconditioning which allow us to treat integer instead of rational matrices. This allows us to introduce integer determinant algorithms to the rational determinant problem. In particular, we discuss the applicability of the adaptive determinant algorithm of [9] and compare it with the integer Chinese Remaindering scheme. We present an analysis of the complexity of the strategies and evaluate their experimental performance on numerous examples. This experience allows us to develop an adaptive strategy which would choose the best solution at the run time, depending on matrix properties. All strategies have been implemented in LinBox linear algebra library.<|reference_end|> | arxiv | @article{urbanska2007towards,
title={Towards an exact adaptive algorithm for the determinant of a rational
matrix},
author={Anna Urbanska (LJK)},
journal={arXiv preprint arXiv:0706.0014},
year={2007},
archivePrefix={arXiv},
eprint={0706.0014},
primaryClass={cs.SC}
} | urbanska2007towards |
arxiv-468 | 0706.0022 | Modeling Computations in a Semantic Network | <|reference_start|>Modeling Computations in a Semantic Network: Semantic network research has seen a resurgence from its early history in the cognitive sciences with the inception of the Semantic Web initiative. The Semantic Web effort has brought forth an array of technologies that support the encoding, storage, and querying of the semantic network data structure at the world stage. Currently, the popular conception of the Semantic Web is that of a data modeling medium where real and conceptual entities are related in semantically meaningful ways. However, new models have emerged that explicitly encode procedural information within the semantic network substrate. With these new technologies, the Semantic Web has evolved from a data modeling medium to a computational medium. This article provides a classification of existing computational modeling efforts and the requirements of supporting technologies that will aid in the further growth of this burgeoning domain.<|reference_end|> | arxiv | @article{rodriguez2007modeling,
title={Modeling Computations in a Semantic Network},
author={Marko A. Rodriguez and Johan Bollen},
journal={arXiv preprint arXiv:0706.0022},
year={2007},
archivePrefix={arXiv},
eprint={0706.0022},
primaryClass={cs.AI}
} | rodriguez2007modeling |
arxiv-469 | 0706.0046 | Symmetry Partition Sort | <|reference_start|>Symmetry Partition Sort: In this paper, we propose a useful replacement for quicksort-style utility functions. The replacement is called Symmetry Partition Sort, which has essentially the same principle as Proportion Extend Sort. The maximal difference between them is that the new algorithm always places already partially sorted inputs (used as a basis for the proportional extension) on both ends when entering the partition routine. This is advantageous to speeding up the partition routine. The library function based on the new algorithm is more attractive than Psort which is a library function introduced in 2004. Its implementation mechanism is simple. The source code is clearer. The speed is faster, with O(n log n) performance guarantee. Both the robustness and adaptivity are better. As a library function, it is competitive.<|reference_end|> | arxiv | @article{chen2007symmetry,
title={Symmetry Partition Sort},
author={Jing-Chao Chen},
journal={arXiv preprint arXiv:0706.0046},
year={2007},
archivePrefix={arXiv},
eprint={0706.0046},
primaryClass={cs.DS}
} | chen2007symmetry |
arxiv-470 | 0706.0103 | Many concepts and two logics of algorithmic reduction | <|reference_start|>Many concepts and two logics of algorithmic reduction: Within the program of finding axiomatizations for various parts of computability logic, it was proved earlier that the logic of interactive Turing reduction is exactly the implicative fragment of Heyting's intuitionistic calculus. That sort of reduction permits unlimited reusage of the computational resource represented by the antecedent. An at least equally basic and natural sort of algorithmic reduction, however, is the one that does not allow such reusage. The present article shows that turning the logic of the first sort of reduction into the logic of the second sort of reduction takes nothing more than just deleting the contraction rule from its Gentzen-style axiomatization. The first (Turing) sort of interactive reduction is also shown to come in three natural versions. While those three versions are very different from each other, their logical behaviors (in isolation) turn out to be indistinguishable, with that common behavior being precisely captured by implicative intuitionistic logic. Among the other contributions of the present article is an informal introduction of a series of new -- finite and bounded -- versions of recurrence operations and the associated reduction operations. An online source on computability logic can be found at http://www.cis.upenn.edu/~giorgi/cl.html<|reference_end|> | arxiv | @article{japaridze2007many,
title={Many concepts and two logics of algorithmic reduction},
author={Giorgi Japaridze},
journal={Studia Logica 91 (2009), pp. 1-24},
year={2007},
doi={10.1007/s11225-009-9164-7},
archivePrefix={arXiv},
eprint={0706.0103},
primaryClass={cs.LO math.LO}
} | japaridze2007many |
arxiv-471 | 0706.0225 | On the End-to-End Distortion for a Buffered Transmission over Fading Channel | <|reference_start|>On the End-to-End Distortion for a Buffered Transmission over Fading Channel: In this paper, we study the end-to-end distortion/delay tradeoff for a analogue source transmitted over a fading channel. The analogue source is quantized and stored in a buffer until it is transmitted. There are two extreme cases as far as buffer delay is concerned: no delay and infinite delay. We observe that there is a significant power gain by introducing a buffer delay. Our goal is to investigate the situation between these two extremes. Using recently proposed \emph{effective capacity} concept, we derive a closed-form formula for this tradeoff. For SISO case, an asymptotically tight upper bound for our distortion-delay curve is derived, which approaches to the infinite delay lower bound as $\mathcal{D}_\infty \exp(\frac{C}{\tau_n})$, with $\tau_n$ is the normalized delay, $C$ is a constant. For more general MIMO channel, we computed the distortion SNR exponent -- the exponential decay rate of the expected distortion in the high SNR regime. Numerical results demonstrate that introduction of a small amount delay can save significant transmission power.<|reference_end|> | arxiv | @article{li2007on,
title={On the End-to-End Distortion for a Buffered Transmission over Fading
Channel},
author={Qiang Li and C. N. Georghiades},
journal={arXiv preprint arXiv:0706.0225},
year={2007},
archivePrefix={arXiv},
eprint={0706.0225},
primaryClass={cs.IT math.IT}
} | li2007on |
arxiv-472 | 0706.0252 | Applying the Z-transform for the static analysis of floating-point numerical filters | <|reference_start|>Applying the Z-transform for the static analysis of floating-point numerical filters: Digital linear filters are used in a variety of applications (sound treatment, control/command, etc.), implemented in software, in hardware, or a combination thereof. For safety-critical applications, it is necessary to bound all variables and outputs of all filters. We give a compositional, effective abstraction for digital linear filters expressed as block diagrams, yielding sound, precise bounds for fixed-point or floating-point implementations of the filters.<|reference_end|> | arxiv | @article{monniaux2007applying,
title={Applying the Z-transform for the static analysis of floating-point
numerical filters},
author={David Monniaux (LIENS)},
journal={arXiv preprint arXiv:0706.0252},
year={2007},
archivePrefix={arXiv},
eprint={0706.0252},
primaryClass={cs.PL cs.NA}
} | monniaux2007applying |
arxiv-473 | 0706.0280 | Multi-Agent Modeling Using Intelligent Agents in the Game of Lerpa | <|reference_start|>Multi-Agent Modeling Using Intelligent Agents in the Game of Lerpa: Game theory has many limitations implicit in its application. By utilizing multiagent modeling, it is possible to solve a number of problems that are unsolvable using traditional game theory. In this paper reinforcement learning is applied to neural networks to create intelligent agents<|reference_end|> | arxiv | @article{hurwitz2007multi-agent,
title={Multi-Agent Modeling Using Intelligent Agents in the Game of Lerpa},
author={Evan Hurwitz and Tshilidzi Marwala},
journal={arXiv preprint arXiv:0706.0280},
year={2007},
archivePrefix={arXiv},
eprint={0706.0280},
primaryClass={cs.MA cs.GT}
} | hurwitz2007multi-agent |
arxiv-474 | 0706.0300 | Automatic Detection of Pulmonary Embolism using Computational Intelligence | <|reference_start|>Automatic Detection of Pulmonary Embolism using Computational Intelligence: This article describes the implementation of a system designed to automatically detect the presence of pulmonary embolism in lung scans. These images are firstly segmented, before alignment and feature extraction using PCA. The neural network was trained using the Hybrid Monte Carlo method, resulting in a committee of 250 neural networks and good results are obtained.<|reference_end|> | arxiv | @article{scurrell2007automatic,
title={Automatic Detection of Pulmonary Embolism using Computational
Intelligence},
author={Simon Scurrell, Tshilidzi Marwala and David Rubin},
journal={arXiv preprint arXiv:0706.0300},
year={2007},
archivePrefix={arXiv},
eprint={0706.0300},
primaryClass={cs.CV}
} | scurrell2007automatic |
arxiv-475 | 0706.0306 | Submission of content to a digital object repository using a configurable workflow system | <|reference_start|>Submission of content to a digital object repository using a configurable workflow system: The prototype of a workflow system for the submission of content to a digital object repository is here presented. It is based entirely on open-source standard components and features a service-oriented architecture. The front-end consists of Java Business Process Management (jBPM), Java Server Faces (JSF), and Java Server Pages (JSP). A Fedora Repository and a mySQL data base management system serve as a back-end. The communication between front-end and back-end uses a SOAP minimal binding stub. We describe the design principles and the construction of the prototype and discuss the possibilities and limitations of work ow creation by administrators. The code of the prototype is open-source and can be retrieved in the project escipub at http://sourceforge.net<|reference_end|> | arxiv | @article{hense2007submission,
title={Submission of content to a digital object repository using a
configurable workflow system},
author={Andreas Hense, Johannes Mueller},
journal={arXiv preprint arXiv:0706.0306},
year={2007},
archivePrefix={arXiv},
eprint={0706.0306},
primaryClass={cs.DL}
} | hense2007submission |
arxiv-476 | 0706.0323 | Multiplication of free random variables and the S-transform: the case of vanishing mean | <|reference_start|>Multiplication of free random variables and the S-transform: the case of vanishing mean: This note extends Voiculescu's S-transform based analytical machinery for free multiplicative convolution to the case where the mean of the probability measures vanishes. We show that with the right interpretation of the S-transform in the case of vanishing mean, the usual formula makes perfectly good sense.<|reference_end|> | arxiv | @article{rao2007multiplication,
title={Multiplication of free random variables and the S-transform: the case of
vanishing mean},
author={N. Raj Rao and Roland Speicher},
journal={arXiv preprint arXiv:0706.0323},
year={2007},
archivePrefix={arXiv},
eprint={0706.0323},
primaryClass={math.OA cs.IT math.IT math.PR}
} | rao2007multiplication |
arxiv-477 | 0706.0427 | Watermark Embedding and Detection | <|reference_start|>Watermark Embedding and Detection: The embedder and the detector (or decoder) are the two most important components of the digital watermarking systems. Thus in this work, we discuss how to design a better embedder and detector (or decoder). I first give a summary of the prospective applications of watermarking technology and major watermarking schemes in the literature. My review on the literature closely centers upon how the side information is exploited at both embedders and detectors. In Chapter 3, I explore the optimum detector or decoder according to a particular probability distribution of the host signals. We found that the performance of both multiplicative and additive spread spectrum schemes depends on the shape parameter of the host signals. For spread spectrum schemes, the performance of the detector or the decoder is reduced by the host interference. Thus I present a new host-interference rejection technique for the multiplicative spread spectrum schemes. Its embedding rule is tailored to the optimum detection or decoding rule. Though the host interference rejection schemes enjoy a big performance gain over the traditional spread spectrum schemes, their drawbacks that it is difficult for them to be implemented with the perceptual analysis to achieve the maximum allowable embedding level discourage their use in real scenarios. Thus, in the last chapters of this work, I introduce a double-sided technique to tackle this drawback. It differs from the host interference rejection schemes in that it utilizes but does not reject the host interference at its embedder. The perceptual analysis can be easily implemented in our scheme to achieve the maximum allowable level of embedding strength.<|reference_end|> | arxiv | @article{zhong2007watermark,
title={Watermark Embedding and Detection},
author={Jidong Zhong},
journal={arXiv preprint arXiv:0706.0427},
year={2007},
archivePrefix={arXiv},
eprint={0706.0427},
primaryClass={cs.MM cs.CR}
} | zhong2007watermark |
arxiv-478 | 0706.0430 | Anonymity in the Wild: Mixes on unstructured networks | <|reference_start|>Anonymity in the Wild: Mixes on unstructured networks: As decentralized computing scenarios get ever more popular, unstructured topologies are natural candidates to consider running mix networks upon. We consider mix network topologies where mixes are placed on the nodes of an unstructured network, such as social networks and scale-free random networks. We explore the efficiency and traffic analysis resistance properties of mix networks based on unstructured topologies as opposed to theoretically optimal structured topologies, under high latency conditions. We consider a mix of directed and undirected network models, as well as one real world case study -- the LiveJournal friendship network topology. Our analysis indicates that mix-networks based on scale-free and small-world topologies have, firstly, mix-route lengths that are roughly comparable to those in expander graphs; second, that compromise of the most central nodes has little effect on anonymization properties, and third, batch sizes required for warding off intersection attacks need to be an order of magnitude higher in unstructured networks in comparison with expander graph topologies.<|reference_end|> | arxiv | @article{nagaraja2007anonymity,
title={Anonymity in the Wild: Mixes on unstructured networks},
author={Shishir Nagaraja},
journal={arXiv preprint arXiv:0706.0430},
year={2007},
archivePrefix={arXiv},
eprint={0706.0430},
primaryClass={cs.CR cs.NI}
} | nagaraja2007anonymity |
arxiv-479 | 0706.0431 | Abstract numeration systems on bounded languages and multiplication by a constant | <|reference_start|>Abstract numeration systems on bounded languages and multiplication by a constant: A set of integers is $S$-recognizable in an abstract numeration system $S$ if the language made up of the representations of its elements is accepted by a finite automaton. For abstract numeration systems built over bounded languages with at least three letters, we show that multiplication by an integer $\lambda\ge2$ does not preserve $S$-recognizability, meaning that there always exists a $S$-recognizable set $X$ such that $\lambda X$ is not $S$-recognizable. The main tool is a bijection between the representation of an integer over a bounded language and its decomposition as a sum of binomial coefficients with certain properties, the so-called combinatorial numeration system.<|reference_end|> | arxiv | @article{charlier2007abstract,
title={Abstract numeration systems on bounded languages and multiplication by a
constant},
author={Emilie Charlier, Michel Rigo, Wolfgang Steiner (LIAFA)},
journal={Integers: Electronic Journal of Combinatorial Number Theory 8, 1
(2008) #35},
year={2007},
archivePrefix={arXiv},
eprint={0706.0431},
primaryClass={cs.DM math.CO}
} | charlier2007abstract |
arxiv-480 | 0706.0447 | Non lin\'earit\'e des fonctions bool\'eennes donn\'ees par des traces de polyn\^omes de degr\'e binaire 3 | <|reference_start|>Non lin\'earit\'e des fonctions bool\'eennes donn\'ees par des traces de polyn\^omes de degr\'e binaire 3: Nous \'etudions la non lin\'earit\'e des fonctions d\'efinies sur F_{2^m} o\`u $m$ est un entier impair, associ\'ees aux polyn\^omes de degr\'e 7 ou \`a des polyn\^omes plus g\'en\'eraux. ----- We study the nonlinearity of the functions defined on F_{2^m} where $m$ is an odd integer, associated to the polynomials of degree 7 or more general polynomials.<|reference_end|> | arxiv | @article{rodier2007non,
title={Non lin\'earit\'e des fonctions bool\'eennes donn\'ees par des traces de
polyn\^omes de degr\'e binaire 3},
author={Franc{c}ois Rodier (IML), Eric F'erard (GAATI)},
journal={arXiv preprint arXiv:0706.0447},
year={2007},
archivePrefix={arXiv},
eprint={0706.0447},
primaryClass={math.NT cs.CR cs.DM math.AG}
} | rodier2007non |
arxiv-481 | 0706.0457 | Challenges and Opportunities of Evolutionary Robotics | <|reference_start|>Challenges and Opportunities of Evolutionary Robotics: Robotic hardware designs are becoming more complex as the variety and number of on-board sensors increase and as greater computational power is provided in ever-smaller packages on-board robots. These advances in hardware, however, do not automatically translate into better software for controlling complex robots. Evolutionary techniques hold the potential to solve many difficult problems in robotics which defy simple conventional approaches, but present many challenges as well. Numerous disciplines including artificial life, cognitive science and neural networks, rule-based systems, behavior-based control, genetic algorithms and other forms of evolutionary computation have contributed to shaping the current state of evolutionary robotics. This paper provides an overview of developments in the emerging field of evolutionary robotics, and discusses some of the opportunities and challenges which currently face practitioners in the field.<|reference_end|> | arxiv | @article{sofge2007challenges,
title={Challenges and Opportunities of Evolutionary Robotics},
author={D. A. Sofge, M. A. Potter, M. D. Bugajska, A. C. Schultz},
journal={D.A. Sofge, M.A. Potter, M.D. Bugajska, and A.C. Schultz,
"Challenges and Opportunities of Evolutionary Robotics." In Proc. 2nd Int'l
Conf. on Computational Intelligence, Robotics, and Autonomous Systems, 2003},
year={2007},
archivePrefix={arXiv},
eprint={0706.0457},
primaryClass={cs.NE cs.RO}
} | sofge2007challenges |
arxiv-482 | 0706.0465 | Virtual Sensor Based Fault Detection and Classification on a Plasma Etch Reactor | <|reference_start|>Virtual Sensor Based Fault Detection and Classification on a Plasma Etch Reactor: The SEMATECH sponsored J-88-E project teaming Texas Instruments with NeuroDyne (et al.) focused on Fault Detection and Classification (FDC) on a Lam 9600 aluminum plasma etch reactor, used in the process of semiconductor fabrication. Fault classification was accomplished by implementing a series of virtual sensor models which used data from real sensors (Lam Station sensors, Optical Emission Spectroscopy, and RF Monitoring) to predict recipe setpoints and wafer state characteristics. Fault detection and classification were performed by comparing predicted recipe and wafer state values with expected values. Models utilized include linear PLS, Polynomial PLS, and Neural Network PLS. Prediction of recipe setpoints based upon sensor data provides a capability for cross-checking that the machine is maintaining the desired setpoints. Wafer state characteristics such as Line Width Reduction and Remaining Oxide were estimated on-line using these same process sensors (Lam, OES, RFM). Wafer-to-wafer measurement of these characteristics in a production setting (where typically this information may be only sparsely available, if at all, after batch processing runs with numerous wafers have been completed) would provide important information to the operator that the process is or is not producing wafers within acceptable bounds of product quality. Production yield is increased, and correspondingly per unit cost is reduced, by providing the operator with the opportunity to adjust the process or machine before etching more wafers.<|reference_end|> | arxiv | @article{sofge2007virtual,
title={Virtual Sensor Based Fault Detection and Classification on a Plasma Etch
Reactor},
author={D. A. Sofge},
journal={D. Sofge, "Virtual Sensor Based Fault Detection and Classification
on a Plasma Etch Reactor," The 2nd Joint Mexico-US Int'l. Workshop on Neural
Networks and Neurocontrol (poster), 1997},
year={2007},
archivePrefix={arXiv},
eprint={0706.0465},
primaryClass={cs.AI cs.CV}
} | sofge2007virtual |
arxiv-483 | 0706.0484 | Motivation, Design, and Ubiquity: A Discussion of Research Ethics and Computer Science | <|reference_start|>Motivation, Design, and Ubiquity: A Discussion of Research Ethics and Computer Science: Modern society is permeated with computers, and the software that controls them can have latent, long-term, and immediate effects that reach far beyond the actual users of these systems. This places researchers in Computer Science and Software Engineering in a critical position of influence and responsibility, more than any other field because computer systems are vital research tools for other disciplines. This essay presents several key ethical concerns and responsibilities relating to research in computing. The goal is to promote awareness and discussion of ethical issues among computer science researchers. A hypothetical case study is provided, along with questions for reflection and discussion.<|reference_end|> | arxiv | @article{wright2007motivation,,
title={Motivation, Design, and Ubiquity: A Discussion of Research Ethics and
Computer Science},
author={David R. Wright},
journal={arXiv preprint arXiv:0706.0484},
year={2007},
archivePrefix={arXiv},
eprint={0706.0484},
primaryClass={cs.GL}
} | wright2007motivation, |
arxiv-484 | 0706.0489 | Sampling Colourings of the Triangular Lattice | <|reference_start|>Sampling Colourings of the Triangular Lattice: We show that the Glauber dynamics on proper 9-colourings of the triangular lattice is rapidly mixing, which allows for efficient sampling. Consequently, there is a fully polynomial randomised approximation scheme (FPRAS) for counting proper 9-colourings of the triangular lattice. Proper colourings correspond to configurations in the zero-temperature anti-ferromagnetic Potts model. We show that the spin system consisting of proper 9-colourings of the triangular lattice has strong spatial mixing. This implies that there is a unique infinite-volume Gibbs distribution, which is an important property studied in statistical physics. Our results build on previous work by Goldberg, Martin and Paterson, who showed similar results for 10 colours on the triangular lattice. Their work was preceded by Salas and Sokal's 11-colour result. Both proofs rely on computational assistance, and so does our 9-colour proof. We have used a randomised heuristic to guide us towards rigourous results.<|reference_end|> | arxiv | @article{jalsenius2007sampling,
title={Sampling Colourings of the Triangular Lattice},
author={Markus Jalsenius},
journal={arXiv preprint arXiv:0706.0489},
year={2007},
archivePrefix={arXiv},
eprint={0706.0489},
primaryClass={math-ph cs.DM cs.DS math.MP}
} | jalsenius2007sampling |
arxiv-485 | 0706.0502 | Relating two standard notions of secrecy | <|reference_start|>Relating two standard notions of secrecy: Two styles of definitions are usually considered to express that a security protocol preserves the confidentiality of a data s. Reachability-based secrecy means that s should never be disclosed while equivalence-based secrecy states that two executions of a protocol with distinct instances for s should be indistinguishable to an attacker. Although the second formulation ensures a higher level of security and is closer to cryptographic notions of secrecy, decidability results and automatic tools have mainly focused on the first definition so far. This paper initiates a systematic investigation of the situations where syntactic secrecy entails strong secrecy. We show that in the passive case, reachability-based secrecy actually implies equivalence-based secrecy for digital signatures, symmetric and asymmetric encryption provided that the primitives are probabilistic. For active adversaries, we provide sufficient (and rather tight) conditions on the protocol for this implication to hold.<|reference_end|> | arxiv | @article{cortier2007relating,
title={Relating two standard notions of secrecy},
author={Veronique Cortier, Michael Rusinovitch, Eugen Zalinescu},
journal={Logical Methods in Computer Science, Volume 3, Issue 3 (July 6,
2007) lmcs:1093},
year={2007},
doi={10.2168/LMCS-3(3:2)2007},
archivePrefix={arXiv},
eprint={0706.0502},
primaryClass={cs.CR cs.LO}
} | cortier2007relating |
arxiv-486 | 0706.0507 | A collaborative framework to exchange and share product information within a supply chain context | <|reference_start|>A collaborative framework to exchange and share product information within a supply chain context: The new requirement for "collaboration" between multidisciplinary collaborators induces to exchange and share adequate information on the product, processes throughout the products' lifecycle. Thus, effective capture of information, and also its extraction, recording, exchange, sharing, and reuse become increasingly critical. These lead companies to adopt new improved methodologies in managing the exchange and sharing of information. The aim of this paper is to describe a collaborative framework system to exchange and share information, which is based on: (i) The Product Process Collaboration Organization model (PPCO) which defines product and process information, and the various collaboration methods for the organizations involved in the supply chain. (ii) Viewpoint model describes relationships between each actor and the comprehensive Product/Process model, defining each actor's "domain of interest" within the evolving product definition. (iii) A layer which defines the comprehensive organization and collaboration relationships between the actors within the supply chain. (iv) Based on the above relationships, the last layer proposes a typology of exchanged messages. A communication method, based on XML, is developed that supports optimal exchange/sharing of information. To illustrate the proposed framework system, an example is presented related to collaborative design of a new piston for an automotive engine. The focus is on user-viewpoint integration to ensure that the adequate information is retrieved from the PPCO.<|reference_end|> | arxiv | @article{geryville2007a,
title={A collaborative framework to exchange and share product information
within a supply chain context},
author={Hichem Geryville (LIESP), Yacine Ouzrout (LIESP), Abdelaziz Bouras
(LIESP), Nikolaos Sapidis},
journal={arXiv preprint arXiv:0706.0507},
year={2007},
archivePrefix={arXiv},
eprint={0706.0507},
primaryClass={cs.HC}
} | geryville2007a |
arxiv-487 | 0706.0523 | Interpolant-Based Transition Relation Approximation | <|reference_start|>Interpolant-Based Transition Relation Approximation: In predicate abstraction, exact image computation is problematic, requiring in the worst case an exponential number of calls to a decision procedure. For this reason, software model checkers typically use a weak approximation of the image. This can result in a failure to prove a property, even given an adequate set of predicates. We present an interpolant-based method for strengthening the abstract transition relation in case of such failures. This approach guarantees convergence given an adequate set of predicates, without requiring an exact image computation. We show empirically that the method converges more rapidly than an earlier method based on counterexample analysis.<|reference_end|> | arxiv | @article{jhala2007interpolant-based,
title={Interpolant-Based Transition Relation Approximation},
author={Ranjit Jhala, Kenneth L. McMillan},
journal={Logical Methods in Computer Science, Volume 3, Issue 4 (November
1, 2007) lmcs:1152},
year={2007},
doi={10.2168/LMCS-3(4:1)2007},
archivePrefix={arXiv},
eprint={0706.0523},
primaryClass={cs.LO cs.PL cs.SE}
} | jhala2007interpolant-based |
arxiv-488 | 0706.0534 | Compressed Regression | <|reference_start|>Compressed Regression: Recent research has studied the role of sparsity in high dimensional regression and signal reconstruction, establishing theoretical limits for recovering sparse models from sparse data. This line of work shows that $\ell_1$-regularized least squares regression can accurately estimate a sparse linear model from $n$ noisy examples in $p$ dimensions, even if $p$ is much larger than $n$. In this paper we study a variant of this problem where the original $n$ input variables are compressed by a random linear transformation to $m \ll n$ examples in $p$ dimensions, and establish conditions under which a sparse linear model can be successfully recovered from the compressed data. A primary motivation for this compression procedure is to anonymize the data and preserve privacy by revealing little information about the original data. We characterize the number of random projections that are required for $\ell_1$-regularized compressed regression to identify the nonzero coefficients in the true model with probability approaching one, a property called ``sparsistence.'' In addition, we show that $\ell_1$-regularized compressed regression asymptotically predicts as well as an oracle linear model, a property called ``persistence.'' Finally, we characterize the privacy properties of the compression procedure in information-theoretic terms, establishing upper bounds on the mutual information between the compressed and uncompressed data that decay to zero.<|reference_end|> | arxiv | @article{zhou2007compressed,
title={Compressed Regression},
author={Shuheng Zhou, John Lafferty, Larry Wasserman},
journal={IEEE Transactions on Information Theory, Volume 55, No.2, pp
846--866, 2009},
year={2007},
archivePrefix={arXiv},
eprint={0706.0534},
primaryClass={stat.ML cs.IT math.IT}
} | zhou2007compressed |
arxiv-489 | 0706.0564 | Tropical Implicitization and Mixed Fiber Polytopes | <|reference_start|>Tropical Implicitization and Mixed Fiber Polytopes: The software TrIm offers implementations of tropical implicitization and tropical elimination, as developed by Tevelev and the authors. Given a polynomial map with generic coefficients, TrIm computes the tropical variety of the image. When the image is a hypersurface, the output is the Newton polytope of the defining polynomial. TrIm can thus be used to compute mixed fiber polytopes, including secondary polytopes.<|reference_end|> | arxiv | @article{sturmfels2007tropical,
title={Tropical Implicitization and Mixed Fiber Polytopes},
author={Bernd Sturmfels and Josephine Yu},
journal={Software for algebraic geometry, 111--131, IMA Vol. Math. Appl.,
148, Springer, New York, 2008},
year={2007},
archivePrefix={arXiv},
eprint={0706.0564},
primaryClass={cs.SC math.AG math.CO}
} | sturmfels2007tropical |
arxiv-490 | 0706.0580 | Efficient Batch Update of Unique Identifiers in a Distributed Hash Table for Resources in a Mobile Host | <|reference_start|>Efficient Batch Update of Unique Identifiers in a Distributed Hash Table for Resources in a Mobile Host: Resources in a distributed system can be identified using identifiers based on random numbers. When using a distributed hash table to resolve such identifiers to network locations, the straightforward approach is to store the network location directly in the hash table entry associated with an identifier. When a mobile host contains a large number of resources, this requires that all of the associated hash table entries must be updated when its network address changes. We propose an alternative approach where we store a host identifier in the entry associated with a resource identifier and the actual network address of the host in a separate host entry. This can drastically reduce the time required for updating the distributed hash table when a mobile host changes its network address. We also investigate under which circumstances our approach should or should not be used. We evaluate and confirm the usefulness of our approach with experiments run on top of OpenDHT.<|reference_end|> | arxiv | @article{chung2007efficient,
title={Efficient Batch Update of Unique Identifiers in a Distributed Hash Table
for Resources in a Mobile Host},
author={Yoo Chung},
journal={arXiv preprint arXiv:0706.0580},
year={2007},
doi={10.1109/ISPA.2010.73},
archivePrefix={arXiv},
eprint={0706.0580},
primaryClass={cs.NI}
} | chung2007efficient |
arxiv-491 | 0706.0585 | A Novel Model of Working Set Selection for SMO Decomposition Methods | <|reference_start|>A Novel Model of Working Set Selection for SMO Decomposition Methods: In the process of training Support Vector Machines (SVMs) by decomposition methods, working set selection is an important technique, and some exciting schemes were employed into this field. To improve working set selection, we propose a new model for working set selection in sequential minimal optimization (SMO) decomposition methods. In this model, it selects B as working set without reselection. Some properties are given by simple proof, and experiments demonstrate that the proposed method is in general faster than existing methods.<|reference_end|> | arxiv | @article{zhao2007a,
title={A Novel Model of Working Set Selection for SMO Decomposition Methods},
author={Zhendong Zhao, Lei Yuan, Yuxuan Wang, Forrest Sheng Bao, Shunyi Zhang
Yanfei Sun},
journal={arXiv preprint arXiv:0706.0585},
year={2007},
doi={10.1109/ICTAI.2007.99},
archivePrefix={arXiv},
eprint={0706.0585},
primaryClass={cs.LG cs.AI}
} | zhao2007a |
arxiv-492 | 0706.0682 | Code spectrum and reliability function: Gaussian channel | <|reference_start|>Code spectrum and reliability function: Gaussian channel: A new approach for upper bounding the channel reliability function using the code spectrum is described. It allows to treat both low and high rate cases in a unified way. In particular, the earlier known upper bounds are improved, and a new derivation of the sphere-packing bound is presented.<|reference_end|> | arxiv | @article{burnashev2007code,
title={Code spectrum and reliability function: Gaussian channel},
author={Marat V. Burnashev},
journal={Problems of Information Transmission, vol. 43, no. 2, pp. 3-24,
2007},
year={2007},
archivePrefix={arXiv},
eprint={0706.0682},
primaryClass={cs.IT math.IT}
} | burnashev2007code |
arxiv-493 | 0706.0685 | Non-Parametric Field Estimation using Randomly Deployed, Noisy, Binary Sensors | <|reference_start|>Non-Parametric Field Estimation using Randomly Deployed, Noisy, Binary Sensors: The reconstruction of a deterministic data field from binary-quantized noisy observations of sensors which are randomly deployed over the field domain is studied. The study focuses on the extremes of lack of deterministic control in the sensor deployment, lack of knowledge of the noise distribution, and lack of sensing precision and reliability. Such adverse conditions are motivated by possible real-world scenarios where a large collection of low-cost, crudely manufactured sensors are mass-deployed in an environment where little can be assumed about the ambient noise. A simple estimator that reconstructs the entire data field from these unreliable, binary-quantized, noisy observations is proposed. Technical conditions for the almost sure and integrated mean squared error (MSE) convergence of the estimate to the data field, as the number of sensors tends to infinity, are derived and their implications are discussed. For finite-dimensional, bounded-variation, and Sobolev-differentiable function classes, specific integrated MSE decay rates are derived. For the first and third function classes these rates are found to be minimax order optimal with respect to infinite precision sensing and known noise distribution.<|reference_end|> | arxiv | @article{wang2007non-parametric,
title={Non-Parametric Field Estimation using Randomly Deployed, Noisy, Binary
Sensors},
author={Ye Wang, Prakash Ishwar},
journal={arXiv preprint arXiv:0706.0685},
year={2007},
archivePrefix={arXiv},
eprint={0706.0685},
primaryClass={cs.IT math.IT}
} | wang2007non-parametric |
arxiv-494 | 0706.0692 | Probabilistic Interval Temporal Logic and Duration Calculus with Infinite Intervals: Complete Proof Systems | <|reference_start|>Probabilistic Interval Temporal Logic and Duration Calculus with Infinite Intervals: Complete Proof Systems: The paper presents probabilistic extensions of interval temporal logic (ITL) and duration calculus (DC) with infinite intervals and complete Hilbert-style proof systems for them. The completeness results are a strong completeness theorem for the system of probabilistic ITL with respect to an abstract semantics and a relative completeness theorem for the system of probabilistic DC with respect to real-time semantics. The proposed systems subsume probabilistic real-time DC as known from the literature. A correspondence between the proposed systems and a system of probabilistic interval temporal logic with finite intervals and expanding modalities is established too.<|reference_end|> | arxiv | @article{guelev2007probabilistic,
title={Probabilistic Interval Temporal Logic and Duration Calculus with
Infinite Intervals: Complete Proof Systems},
author={Dimitar P. Guelev},
journal={Logical Methods in Computer Science, Volume 3, Issue 3 (July 19,
2007) lmcs:947},
year={2007},
doi={10.2168/LMCS-3(3:3)2007},
archivePrefix={arXiv},
eprint={0706.0692},
primaryClass={cs.LO}
} | guelev2007probabilistic |
arxiv-495 | 0706.0720 | Universal Quantile Estimation with Feedback in the Communication-Constrained Setting | <|reference_start|>Universal Quantile Estimation with Feedback in the Communication-Constrained Setting: We consider the following problem of decentralized statistical inference: given i.i.d. samples from an unknown distribution, estimate an arbitrary quantile subject to limits on the number of bits exchanged. We analyze a standard fusion-based architecture, in which each of $m$ sensors transmits a single bit to the fusion center, which in turn is permitted to send some number $k$ bits of feedback. Supposing that each of $\nodenum$ sensors receives $n$ observations, the optimal centralized protocol yields mean-squared error decaying as $\order(1/[n m])$. We develop and analyze the performance of various decentralized protocols in comparison to this centralized gold-standard. First, we describe a decentralized protocol based on $k = \log(\nodenum)$ bits of feedback that is strongly consistent, and achieves the same asymptotic MSE as the centralized optimum. Second, we describe and analyze a decentralized protocol based on only a single bit ($k=1$) of feedback. For step sizes independent of $m$, it achieves an asymptotic MSE of order $\order[1/(n \sqrt{m})]$, whereas for step sizes decaying as $1/\sqrt{m}$, it achieves the same $\order(1/[n m])$ decay in MSE as the centralized optimum. Our theoretical results are complemented by simulations, illustrating the tradeoffs between these different protocols.<|reference_end|> | arxiv | @article{rajagopal2007universal,
title={Universal Quantile Estimation with Feedback in the
Communication-Constrained Setting},
author={Ram Rajagopal, Martin J. Wainwright},
journal={arXiv preprint arXiv:0706.0720},
year={2007},
archivePrefix={arXiv},
eprint={0706.0720},
primaryClass={cs.IT math.IT}
} | rajagopal2007universal |
arxiv-496 | 0706.0869 | Position Coding | <|reference_start|>Position Coding: A position coding pattern is an array of symbols in which subarrays of a certain fixed size appear at most once. So, each subarray uniquely identifies a location in the larger array, which means there is a bijection of some sort from this set of subarrays to a set of coordinates. The key to Fly Pentop Computer paper and other examples of position codes is a method to read the subarray and then convert it to coordinates. Position coding makes use of ideas from discrete mathematics and number theory. In this paper, we will describe the underlying mathematics of two position codes, one being the Anoto code that is the basis of "Fly paper". Then, we will present two new codes, one which uses binary wavelets as part of the bijection.<|reference_end|> | arxiv | @article{aboufadel2007position,
title={Position Coding},
author={Edward Aboufadel, Timothy Armstrong, Elizabeth Smietana},
journal={arXiv preprint arXiv:0706.0869},
year={2007},
archivePrefix={arXiv},
eprint={0706.0869},
primaryClass={cs.IT math.CO math.IT}
} | aboufadel2007position |
arxiv-497 | 0706.0870 | Inferring the Composition of a Trader Population in a Financial Market | <|reference_start|>Inferring the Composition of a Trader Population in a Financial Market: We discuss a method for predicting financial movements and finding pockets of predictability in the price-series, which is built around inferring the heterogeneity of trading strategies in a multi-agent trader population. This work explores extensions to our previous framework (arXiv:physics/0506134). Here we allow for more intelligent agents possessing a richer strategy set, and we no longer constrain the estimate for the heterogeneity of the agents to a probability space. We also introduce a scheme which allows the incorporation of models with a wide variety of agent types, and discuss a mechanism for the removal of bias from relevant parameters.<|reference_end|> | arxiv | @article{gupta2007inferring,
title={Inferring the Composition of a Trader Population in a Financial Market},
author={Nachi Gupta, Raphael Hauser, and Neil F. Johnson},
journal={arXiv preprint arXiv:0706.0870},
year={2007},
doi={10.1007/978-88-470-0665-2_7},
archivePrefix={arXiv},
eprint={0706.0870},
primaryClass={cs.CE nlin.AO}
} | gupta2007inferring |
arxiv-498 | 0706.0903 | Families of traveling impulses and fronts in some models with cross-diffusion | <|reference_start|>Families of traveling impulses and fronts in some models with cross-diffusion: An analysis of traveling wave solutions of partial differential equation (PDE) systems with cross-diffusion is presented. The systems under study fall in a general class of the classical Keller-Segel models to describe chemotaxis. The analysis is conducted using the theory of the phase plane analysis of the corresponding wave systems without a priory restrictions on the boundary conditions of the initial PDE. Special attention is paid to families of traveling wave solutions. Conditions for existence of front-impulse, impulse-front, and front-front traveling wave solutions are formulated. In particular, the simplest mathematical model is presented that has an impulse-impulse solution; we also show that a non-isolated singular point in the ordinary differential equation (ODE) wave system implies existence of free-boundary fronts. The results can be used for construction and analysis of different mathematical models describing systems with chemotaxis.<|reference_end|> | arxiv | @article{berezovskaya2007families,
title={Families of traveling impulses and fronts in some models with
cross-diffusion},
author={Faina Berezovskaya, Artem Novozhilov, Georgy Karev},
journal={arXiv preprint arXiv:0706.0903},
year={2007},
archivePrefix={arXiv},
eprint={0706.0903},
primaryClass={cs.NA}
} | berezovskaya2007families |
arxiv-499 | 0706.1001 | Epistemic Analysis of Strategic Games with Arbitrary Strategy Sets | <|reference_start|>Epistemic Analysis of Strategic Games with Arbitrary Strategy Sets: We provide here an epistemic analysis of arbitrary strategic games based on the possibility correspondences. Such an analysis calls for the use of transfinite iterations of the corresponding operators. Our approach is based on Tarski's Fixpoint Theorem and applies both to the notions of rationalizability and the iterated elimination of strictly dominated strategies.<|reference_end|> | arxiv | @article{apt2007epistemic,
title={Epistemic Analysis of Strategic Games with Arbitrary Strategy Sets},
author={Krzysztof R. Apt},
journal={arXiv preprint arXiv:0706.1001},
year={2007},
archivePrefix={arXiv},
eprint={0706.1001},
primaryClass={cs.GT cs.AI}
} | apt2007epistemic |
arxiv-500 | 0706.1002 | Moving Vertices to Make Drawings Plane | <|reference_start|>Moving Vertices to Make Drawings Plane: A straight-line drawing $\delta$ of a planar graph $G$ need not be plane, but can be made so by moving some of the vertices. Let shift$(G,\delta)$ denote the minimum number of vertices that need to be moved to turn $\delta$ into a plane drawing of $G$. We show that shift$(G,\delta)$ is NP-hard to compute and to approximate, and we give explicit bounds on shift$(G,\delta)$ when $G$ is a tree or a general planar graph. Our hardness results extend to 1BendPointSetEmbeddability, a well-known graph-drawing problem.<|reference_end|> | arxiv | @article{goaoc2007moving,
title={Moving Vertices to Make Drawings Plane},
author={Xavier Goaoc, Jan Kratochvil, Yoshio Okamoto, Chan-Su Shin, Alexander
Wolff},
journal={arXiv preprint arXiv:0706.1002},
year={2007},
archivePrefix={arXiv},
eprint={0706.1002},
primaryClass={cs.CG cs.CC cs.DM}
} | goaoc2007moving |