leroy.bib

@inproceedings{Leroy-well-founded-recursion,
  author = {Xavier Leroy},
  title = {Well-founded recursion done right},
  booktitle = {{CoqPL} 2024: The Tenth International Workshop on {Coq} for Programming Languages},
  organization = {{ACM}},
  address = {London, United Kingdom},
  year = {2024},
  month = jan,
  urllocal = {https://xavierleroy.org/publi/wf-recursion.pdf},
  urlhal = {https://hal.science/hal-04356563},
  abstract = {Several Coq libraries and extensions support the definition of non-structural recursive functions using well-founded orderings for termination. After pointing out some drawbacks of these existing approaches, we advocate going back to the basics and defining recursive functions by explicit structural induction on a proof of accessibility of the principal recursive argument.}
}
@article{Appel-Leroy-extensional,
  author = {Andrew W. Appel and Xavier Leroy},
  title = {Efficient Extensional Binary Tries},
  journal = {Journal of Automated Reasoning},
  year = 2023,
  volume = {67},
  pages = {article 8},
  xtopic = {compcert},
  doi = {10.1007/s10817-022-09655-x},
  urllocal = {https://xavierleroy.org/publi/extensional-binary-tries.pdf},
  urlpublisher = {https://rdcu.be/c3kEC},
  urlarxiv = {https://arxiv.org/abs/2110.05063},
  urlhal = {https://hal.science/hal-03372247},
  abstract = {Lookup tables (finite maps) are a ubiquitous data structure.  In pure functional languages they are best represented using trees instead of hash tables.  In pure functional languages within constructive logic, without a primitive integer type, they are well represented using binary tries instead of search trees.

In this work, we introduce canonical binary tries, an improved binary-trie data structure that enjoys a natural extensionality property, quite useful in proofs, and supports sparseness more efficiently.  We provide full proofs of correctness in Coq.

We provide microbenchmark measurements of canonical binary tries versus several other data structures for finite maps, in a variety of application contexts; as well as measurement of canonical versus original tries in a big, real system. The application context of data structures contained in theorem statements imposes unusual requirements for which canonical tries are particularly well suited.

Code is available at \url{https://github.com/xavierleroy/canonical-binary-tries/tree/v2} .}
}
@article{Courant-Leroy-polygen,
  author = {Nathanaël Courant and Xavier Leroy},
  title = {Verified code generation for the polyhedral model},
  journal = {Proc. {ACM} Program. Lang.},
  volume = {5},
  number = {{POPL}},
  pages = {40:1--40:24},
  year = {2021},
  xtopic = {compverif},
  doi = {10.1145/3434321},
  urllocal = {https://xavierleroy.org/publi/polyhedral-codegen.pdf},
  urlpublisher = {https://doi.org/10.1145/3434321},
  abstract = {The polyhedral model is a high-level intermediate representation for loop nests that supports elegantly a great many loop optimizations.  In a compiler, after polyhedral loop optimizations have been performed, it is necessary and difficult to regenerate sequential or parallel loop nests before continuing compilation.  This paper reports on the formalization and proof of semantic preservation of such a code generator that produces sequential code from a polyhedral representation.  The formalization and proofs are mechanized using the Coq proof assistant.}
}
@misc{Leroy-Compcert-Coq,
  author = {Xavier Leroy},
  title = {The {CompCert} verified compiler, 
                 software and commented proof},
  month = nov,
  year = 2022,
  xtopic = {compcert},
  howpublished = {Available at \url{https://compcert.org/}}
}
@misc{Leroy-inaugural-lecture,
  author = {Xavier Leroy},
  title = {Software, between mind and matter},
  month = mar,
  year = {2020},
  pdf = {https://xavierleroy.org/publi/inaugural-lecture.pdf},
  epub = {https://xavierleroy.org/publi/inaugural-lecture.epub},
  mobi = {https://xavierleroy.org/publi/inaugural-lecture.mobi},
  hal = {hal-02392159},
  urlhal = {https://hal.science/hal-02392159},
  abstract = {This is an English translation of Xavier Leroy's inaugural lecture at Collège de France \cite{Leroy-lecon-inaugurale}.}
}
@book{Leroy-lecon-inaugurale,
  author = {Xavier Leroy},
  title = {Le logiciel, entre l'esprit et la matière},
  publisher = {OpenEdition Books},
  series = {Leçons inaugurales du {Collège} de {France}},
  volume = 284,
  city = {Paris},
  month = dec,
  year = {2019},
  hal = {hal-02405754},
  urlhal = {https://hal.science/hal-02405754},
  urlpublisher = {https://books.openedition.org/cdf/7671},
  urllocal = {https://xavierleroy.org/publi/lecon-inaugurale.pdf},
  abstract = {(See \cite{Leroy-inaugural-lecture} for an English translation.)

Un même matériel informatique peut remplir de nombreuses fonctions différentes par simple changement du logiciel qu'il exécute.  Cette extraordinaire plasticité a permis à l'ordinateur de sortir des centres de calcul et de se répandre partout, des objets du quotidien aux infrastructures de la Cité.  Quels concepts fondamentaux sous-tendent cette prouesse technique?  Comment maîtriser l'incroyable et souvent effrayante complexité du logiciel?  Comment éviter les «bugs» de programmation et résister aux attaques? Comment établir qu'un logiciel est digne de confiance?  À ces questions, la logique mathématique offre des éléments de réponse qui permettent de construire une approche scientifiquement rigoureuse du logiciel.}
}
@book{Leroy-lecon-inaugurale-Fayard,
  author = {Xavier Leroy},
  title = {Le logiciel, entre l'esprit et la matière},
  series = {Leçons inaugurales du {Collège} de {France}},
  volume = 284,
  publisher = {Fayard},
  city = {Paris},
  month = apr,
  year = {2019},
  hal = {hal-02405754},
  abstract = {(See \cite{Leroy-inaugural-lecture} for an English translation.)

Un même matériel informatique peut remplir de nombreuses fonctions différentes par simple changement du logiciel qu'il exécute.  Cette extraordinaire plasticité a permis à l'ordinateur de sortir des centres de calcul et de se répandre partout, des objets du quotidien aux infrastructures de la Cité.  Quels concepts fondamentaux sous-tendent cette prouesse technique?  Comment maîtriser l'incroyable et souvent effrayante complexité du logiciel?  Comment éviter les «bugs» de programmation et résister aux attaques? Comment établir qu'un logiciel est digne de confiance?  À ces questions, la logique mathématique offre des éléments de réponse qui permettent de construire une approche scientifiquement rigoureuse du logiciel.}
}
@inproceedings{CompCert-WCET-2018,
  title = {Embedded Program Annotations for {WCET} Analysis},
  author = {Schommer, Bernhard and Cullmann, Christoph and Gebhard, Gernot
            and Leroy, Xavier and Schmidt, Michael and Wegener, Simon},
  booktitle = {WCET 2018: 18th International Workshop on Worst-Case Execution Time Analysis},
  series = {OASIcs},
  volume = 63,
  publisher = {Dagstuhl Publishing},
  year = {2018},
  month = jul,
  hal = {hal-01848686},
  urlhal = {https://hal.science/hal-01848686},
  urlpublisher = {https://doi.org/10.4230/OASIcs.WCET.2018.8},
  doi = {10.4230/OASIcs.WCET.2018.8},
  xtopic = {compcert},
  xranking = {workshop},
  abstract = {We present __builtin_ais_annot(), a user-friendly, versatile way to transfer annotations (also known as flow facts) written on the source code level to the machine code level. To do so, we couple two tools often used during the development of safety-critical hard real-time systems, the formally verified C compiler CompCert and the static WCET analyzer aiT. CompCert stores the AIS annotations given via __builtin_ais_annot() in a special section of the ELF binary, which can later be extracted automatically by aiT.}
}
@inproceedings{CompCert-ERTS-2018,
  author = {Daniel Kästner and
            Ulrich Wünsche and
            Jörg Barrho and 
            Marc Schlickling and
            Bernhard Schommer and
            Michael Schmidt and
            Christian Ferdinand and
            Xavier Leroy and
            Sandrine Blazy},
  title = {{CompCert}: Practical experience on integrating and qualifying
           a formally verified optimizing compiler},
  booktitle = {ERTS 2018: Embedded Real Time Software and Systems},
  publisher = {SEE},
  year = 2018,
  month = jan,
  hal = {hal-01643290},
  urlhal = {https://hal.science/hal-01643290},
  urllocal = {https://xavierleroy.org/publi/erts2018_compcert.pdf},
  xtopic = {compcert},
  abstract = {CompCert is the first commercially available optimizing compiler that is formally verified, using machine-assisted mathematical proofs, to be exempt from mis-compilation. The executable code it produces is proved to behave exactly as specified by the semantics of the source C program. This article gives an overview of the use of CompCert to gain certification credits for a highly safety-critical industry application, certified according to IEC 60880. We will briefly introduce the target application, illustrate the process of changing the existing compiler infrastructure to CompCert, and discuss performance characteristics. The main part focuses on the tool qualification strategy, in particular on how to take advantage of the formal correctness proof in the certification process.}
}
@article{Leroy-blog-Binaire-2018,
  author = {Xavier Leroy},
  title = {À la recherche du logiciel parfait},
  journal = {Le Monde, blog Binaire},
  year = 2018,
  month = nov,
  uripublisher = {https://www.lemonde.fr/blog/binaire/2018/11/12/a-la-recherche-du-logiciel-parfait/},
  popularscience = {yes}
}
@inproceedings{Bourke-BDLPR-2017,
  title = {A formally verified compiler for {Lustre}},
  author = {Timothy Bourke and
            Lélio Brun and
            Pierre-Évariste Dagand and
            Xavier Leroy and
            Marc Pouzet and
            Lionel Rieg},
  booktitle = {PLDI 2017: Programming Language Design and Implementation},
  publisher = {ACM},
  year = 2017,
  urllocal = {https://xavierleroy.org/publi/velus-pldi17.pdf},
  hal = {hal-01512286},
  urlhal = {https://hal.science/hal-01512286/},
  urlpublisher = {https://doi.org/10.1145/3062341.3062358},
  doi = {10.1145/3062341.3062358},
  pages = {586-601},
  xtopic = {compverif},
  xranking = {top},
  abstract = {The correct compilation of block diagram languages like
Lustre, Scade, and a discrete subset of Simulink is important since
they are used to program critical embedded control software. We
describe the specification and verification in an Interactive Theorem
Prover of a compilation chain that treats the key aspects of Lustre:
sampling, nodes, and delays.  Building on CompCert, we show that
repeated execution of the generated assembly code faithfully
implements the dataflow semantics of source programs.  We resolve two
key technical challenges. The first is the change from a synchronous
dataflow semantics, where programs manipulate streams of values, to an
imperative one, where computations manipulate memory sequentially. The
second is the verified compilation of an imperative language with
encapsulated state to C code where the state is realized by nested
records. We also treat a standard control optimization that eliminates
unnecessary conditional statements.}
}
@inproceedings{Kastner-LBSSF-2017,
  title = {Closing the gap -- The formally verified optimizing compiler {CompCert}},
  author = {K{\"a}stner, Daniel and Leroy, Xavier and Blazy, Sandrine and Schommer, Bernhard and Schmidt, Michael and Ferdinand, Christian},
  urllocal = {https://xavierleroy.org/publi/compcert-SSS2017.pdf},
  hal = {hal-01399482},
  urlhal = {https://hal.science/hal-01399482},
  urlpublisher = {http://scsc.org.uk/p135},
  booktitle = {SSS'17: Developments in System Safety Engineering: Proceedings of the Twenty-fifth Safety-critical Systems Symposium},
  year = {2017},
  publisher = {CreateSpace},
  pages = {163--180},
  xtopic = {compcert},
  xranking = {workshop},
  abstract = {CompCert is the first commercially available optimizing compiler that is formally verified, using machine-assisted mathematical proofs, to be free from miscompilation. The executable code it produces is proved to behave exactly as specified by the semantics of the source C program. CompCert's intended use is the compilation of safety-critical and mission-critical software meeting high levels of assurance. This article gives an overview of the design of CompCert and its proof concept, summarizes the resulting confidence argument, and gives an overview of relevant tool qualification strategies. We briefly summarize practical experience and give an overview of recent CompCert developments.}
}
@article{Leroy-Skylake-2017,
  title = {How {I} found a crash bug with hyperthreading in {Intel}'s {Skylake} processors},
  author = {Xavier Leroy},
  journal = {The Next Web},
  urlpublisher = {https://tnw.to/2tJ08uM},
  year = {2017},
  month = jul,
  popularscience = {yes}
}
@inproceedings{Leroy-BKSPF-2016,
  author = {Xavier Leroy and Sandrine Blazy and Daniel K\"astner
            and Bernhard Schommer and Markus Pister and Christian Ferdinand},
  title = {{CompCert} -- A formally verified optimizing compiler},
  booktitle = {ERTS 2016: Embedded Real Time Software and Systems},
  publisher = {SEE},
  year = 2016,
  xtopic = {compcert},
  urllocal = {https://xavierleroy.org/publi/erts2016_compcert.pdf},
  hal = {hal-01238879},
  urlhal = {https://hal.science/hal-01238879},
  abstract = {CompCert is the first commercially available optimizing compiler that is formally verified, using machine-assisted mathematical proofs, to be exempt from mis-compilation. The executable code it produces is proved to behave exactly as specified by the semantics of the source C program. This article gives an overview of the design of CompCert and its proof concept and then focuses on aspects relevant for industrial application. We briefly summarize practical experience and give an overview of recent CompCert development aiming at industrial usage. CompCert's intended use is the compilation of life-critical and mission-critical software meeting high levels of assurance. In this context tool qualification is of paramount importance. We summarize the confidence argument of CompCert and give an overview of relevant qualification strategies.}
}
@inproceedings{Jourdan-LBLP-2015,
  author = {Jacques-Henri Jourdan and Vincent Laporte and
            Sandrine Blazy and Xavier Leroy and David Pichardie},
  title = {A formally-verified {C} static analyzer},
  booktitle = {POPL 2015: 42nd symposium Principles of Programming Languages},
  publisher = {ACM},
  year = 2015,
  urllocal = {https://xavierleroy.org/publi/verasco-popl2015.pdf},
  urlpublisher = {https://doi.org/10.1145/2676726.2676966},
  doi = {10.1145/2676726.2676966},
  hal = {hal-01078386},
  pages = {247-259},
  xtopic = {analysis},
  xranking = {top},
  abstract = {This paper reports on the design and soundness proof, using the Coq
proof assistant, of Verasco, a static analyzer based on abstract
interpretation for most of the ISO~C~1999 language (excluding
recursion and dynamic allocation).  Verasco establishes the absence of
run-time errors in the analyzed programs.  It enjoys a modular
architecture that supports the extensible combination of multiple
abstract domains, both relational and non-relational.  Verasco
integrates with the CompCert formally-verified C~compiler so that not
only the soundness of the analysis results is guaranteed with
mathematical certitude, but also the fact that these guarantees carry
over to the compiled code.}
}
@article{Boldo-Jourdan-Leroy-Melquiond-JAR,
  title = {Verified compilation of floating-point computations},
  author = {Boldo, Sylvie and Jourdan, Jacques-Henri and Leroy, Xavier and Melquiond, Guillaume},
  journal = {Journal of Automated Reasoning},
  year = 2015,
  volume = 54,
  number = 2,
  pages = {135--163},
  xtopic = {compcert},
  urllocal = {https://xavierleroy.org/publi/floating-point-compcert.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-014-9317-x},
  doi = {10.1007/s10817-014-9317-x},
  hal = {hal-00862689},
  abstract = {Floating-point arithmetic is known to be tricky: roundings, formats,
exceptional values. The IEEE-754 standard was a push towards
straightening the field and made formal reasoning about floating-point
computations easier and flourishing. Unfortunately, this is not
sufficient to guarantee the final result of a program, as several
other actors are involved: programming language, compiler, and
architecture.  The CompCert formally-verified compiler provides a
solution to this problem: this compiler comes with a mathematical
specification of the semantics of its source language (a large subset
of ISO C99) and target platforms (ARM, PowerPC, x86-SSE2), and with a
proof that compilation preserves semantics.  In this paper, we report
on our recent success in formally specifying and proving correct
CompCert's compilation of floating-point arithmetic.  Since CompCert
is verified using the Coq proof assistant, this effort required a
suitable Coq formalization of the IEEE-754 standard; we extended the
Flocq library for this purpose.  As a result, we obtain the first
formally verified compiler that provably preserves the semantics of
floating-point programs.}
}
@inproceedings{Krebbers-Leroy-Wiedijk-2014,
  author = {Robbert Krebbers and Xavier Leroy and Freek Wiedijk},
  title = {Formal {C} semantics: {CompCert} and the {C} standard},
  booktitle = {ITP 2014: Interactive Theorem Proving},
  series = {LNCS},
  number = 8558,
  pages = {543--548},
  publisher = {Springer},
  year = 2014,
  xtopic = {mechsem},
  urllocal = {https://xavierleroy.org/publi/Formal-C-CompCert.pdf},
  hal = {hal-00981212},
  urlhal = {https://hal.science/hal-00981212},
  urlpublisher = {https://doi.org/10.1007/978-3-319-08970-6_36},
  doi = {10.1007/978-3-319-08970-6_36},
  abstract = {
We discuss the difference between a formal semantics of the C standard, and a formal semantics of an implementation of C that satisfies the C standard. In this context we extend the CompCert semantics with end-of-array pointers and the possibility to byte-wise copy objects. This is a first and necessary step towards proving that the CompCert semantics refines the formal version of the C standard that is being developed in the Formalin project in Nijmegen.}
}
@inproceedings{Boldo-Jourdan-Leroy-Melquiond-2013,
  title = {A formally-verified {C} compiler supporting floating-point arithmetic},
  author = {Boldo, Sylvie and Jourdan, Jacques-Henri and Leroy, Xavier and Melquiond, Guillaume},
  booktitle = {ARITH 2013: 21st International Symposium on Computer Arithmetic},
  pages = {107-115},
  publisher = {IEEE Computer Society},
  urllocal = {https://xavierleroy.org/publi/fp-compcert-arith13.pdf},
  hal = {hal-00743090},
  urlhal = {https://hal.science/hal-00743090},
  urlpublisher = {https://doi.org/10.1109/ARITH.2013.30},
  doi = {10.1109/ARITH.2013.30},
  year = 2013,
  xtopic = {compcert},
  xranking = {top},
  abstract = {Floating-point arithmetic is known to be tricky: roundings, formats,
exceptional values. The IEEE-754 standard was a push towards
straightening the field and made formal reasoning about floating-point
computations easier and flourishing. Unfortunately, this is not
sufficient to guarantee the final result of a program, as several
other actors are involved: programming language, compiler,
architecture.  The CompCert formally-verified compiler provides a
solution to this problem: this compiler comes with a mathematical
specification of the semantics of its source language (a large subset
of ISO C90) and target platforms (ARM, PowerPC, x86-SSE2), and with a
proof that compilation preserves semantics.  In this paper, we report
on our recent success in formally specifying and proving correct
CompCert's compilation of floating-point arithmetic.  Since CompCert
is verified using the Coq proof assistant, this effort required a
suitable Coq formalization of the IEEE-754 standard; we extended the
Flocq library for this purpose.  As a result, we obtain the first
formally verified compiler that provably preserves the semantics of
floating-point programs.}
}
@inproceedings{Leroy-APLAS-2012,
  author = {Xavier Leroy},
  title = {Mechanized semantics for compiler verification},
  booktitle = {APLAS 2012: Programming Languages and Systems, 10th Asian Symposium},
  year = {2012},
  month = dec,
  address = {Kyoto, Japan},
  pages = {386--388},
  series = {LNCS},
  number = 7705,
  publisher = {Springer},
  urllocal = {https://xavierleroy.org/publi/mechanized-semantics-aplas-cpp-2012.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-642-35182-2_27},
  doi = {10.1007/978-3-642-35182-2_27},
  hal = {hal-01079337},
  xtopic = {mechsem},
  xinvited = {yes},
  abstract = {The formal verification of compilers and related programming tools
depends crucially on the availability of appropriate mechanized
semantics for the source, intermediate and target languages.  In this
invited talk, I review various forms of operational semantics and
their mechanization, based on my experience with the formal
verification of the CompCert~C compiler.}
}
@inproceedings{Robert-Leroy-2012,
  author = {Valentin Robert and Xavier Leroy},
  title = {A formally-verified alias analysis},
  booktitle = {CPP 2012: Certified Programs and Proofs},
  year = 2012,
  series = {LNCS},
  number = 7679,
  pages = {11-26},
  publisher = {Springer},
  urlpublisher = {https://doi.org/10.1007/978-3-642-35308-6_5},
  doi = {10.1007/978-3-642-35308-6_5},
  hal = {hal-00773109},
  urllocal = {https://xavierleroy.org/publi/alias-analysis.pdf},
  xtopic = {compverif},
  abstract = {This paper reports on the formalization and proof of soundness, using
the Coq proof assistant, of an alias analysis: a static analysis that
approximates the flow of pointer values.  The alias analysis
considered is of the points-to kind and is intraprocedural,
flow-sensitive, field-sensitive, and untyped.  Its soundness proof
follows the general style of abstract interpretation.
The analysis is designed to fit in the CompCert C verified compiler,
supporting future aggressive optimizations over memory accesses.}
}
@incollection{Leroy-Appel-Blazy-Stewart-memory,
  author = {Leroy, Xavier and Andrew W. Appel and Blazy, Sandrine and Stewart, Gordon},
  title = {The {CompCert} memory model},
  year = {2014},
  month = mar,
  booktitle = {Program Logics for Certified Compilers},
  editor = {Appel, Andrew W.},
  publisher = {Cambridge University Press},
  pages = {237--271},
  urllocal = {https://www.cs.princeton.edu/~appel/papers/plcc.pdf},
  xtopic = {mechsem}
}
@inproceedings{Jourdan-Pottier-Leroy,
  author = {Jacques-Henri Jourdan and François Pottier and Xavier Leroy},
  title = {Validating {LR(1)} parsers},
  booktitle = {ESOP 2012: Programming Languages and Systems, 21st European Symposium on Programming},
  urllocal = {https://xavierleroy.org/publi/validated-parser.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-642-28869-2_20},
  doi = {10.1007/978-3-642-28869-2_20},
  hal = {hal-01077321},
  year = 2012,
  pages = {397--416},
  series = {LNCS},
  number = 7211,
  publisher = {Springer},
  xtopic = {compverif},
  xranking = {leading},
  abstract = {
An LR(1) parser is a finite-state automaton, equipped with a stack,
which uses a combination of its current state and one lookahead symbol
in order to determine which action to perform next.  We present a
validator which, when applied to a context-free grammar G and an
automaton A, checks that A and G agree.  This validation of the parser
provides the correctness guarantees required by verified compilers and
other high-assurance software that involves parsing.  The validation
process is independent of which technique was used to construct A. The
validator is implemented and proved correct using the Coq proof
assistant. As an application, we build a formally-verified parser for
the C99 language.}
}
@inproceedings{Bedin-Franca-ERTS-2012,
  author = {Ricardo {Bedin França} and Sandrine Blazy and Denis Favre-Felix
            and Xavier Leroy and Marc Pantel and Jean Souyris},
  title = {Formally verified optimizing compilation in {ACG}-based
           flight control software},
  booktitle = {ERTS 2012: Embedded Real Time Software and Systems},
  year = 2012,
  hal = {hal-00653367},
  urlhal = {https://hal.science/hal-00653367/},
  xtopic = {compcert},
  abstract = {This work presents an evaluation of the CompCert formally specified
and verified optimizing compiler for the development of DO-178 level A
flight control software. First, some fundamental characteristics of
flight control software are presented and the case study program is
described. Then, the use of CompCert is justified: its main point is
to allow optimized code generation by relying on the formal proof of
correctness and additional compilation information instead of the
current un-optimized generation required to produce predictable
assembly code patterns. The evaluation of its performance (measured
using WCET and code size) is presented and the results are compared to
those obtained with the currently used compiler.}
}
@inproceedings{Ramananandro-DosReis-Leroy-construction,
  author = {Tahina Ramananandro and Gabriel {Dos Reis} and Xavier Leroy},
  title = {A mechanized semantics for {C++} object construction and destruction,
           with applications to resource management},
  booktitle = {POPL 2012: 39th symposium Principles of Programming Languages},
  urllocal = {https://xavierleroy.org/publi/cpp-construction.pdf},
  urlpublisher = {https://doi.org/10.1145/2103656.2103718},
  doi = {10.1145/2103656.2103718},
  hal = {hal-00674663},
  pages = {521--532},
  year = 2012,
  xtopic = {mechsem},
  xranking = {top},
  publisher = {ACM},
  abstract = {
  We present a formal operational semantics and its Coq mechanization
  for the C++ object model, featuring object construction and
  destruction, shared and repeated multiple inheritance, and virtual
  function call dispatch.  These are key C++ language features for
  high-level system programming, in particular for predictable and
  reliable resource management.  This paper is the first to present a
  formal mechanized account of the metatheory of construction and
  destruction in C++, and applications to popular programming
  techniques such as ``resource acquisition is initialization.''  We
  also report on irregularities and apparent contradictions in the ISO
  C++03 and C++11 standards.}
}
@inproceedings{Bedin-Franca-PPES-2011,
  author = {Ricardo {Bedin França} and Denis Favre-Felix and Xavier Leroy
            and Marc Pantel and Jean Souyris},
  title = {Towards formally verified optimizing compilation in flight control software},
  booktitle = {PPES 2011: Predictability and Performance in Embedded Systems},
  pages = {59--68},
  series = {OASIcs},
  number = 18,
  publisher = {Dagstuhl Publishing},
  year = {2011},
  hal = {inria-00551370},
  urllocal = {https://hal.science/inria-00551370/},
  urlpublisher = {https://doi.org/10.4230/OASIcs.PPES.2011.59},
  doi = {10.4230/OASIcs.PPES.2011.59},
  xtopic = {compcert},
  xranking = {workshop},
  abstract = {
This work presents a preliminary evaluation of the use of the CompCert formally specified and verified  optimizing compiler for the development of level A critical flight control software. First, the motivation for choosing CompCert is presented, as well as the requirements and constraints for safety-critical avionics software. The main point is to allow optimized code generation by relying on the formal proof of correctness instead of the current un-optimized generation required to produce assembly code structurally similar to the algorithmic language (and even the initial models) source code. The evaluation of its performance (measured using WCET) is presented and the results are compared to those obtained with the currently used compiler. Finally, the paper discusses verification and certification issues that are raised when one seeks to use CompCert for the development of such critical software. }
}
@inproceedings{Leroy-POPL11,
  author = {Xavier Leroy},
  title = {Verified squared: does critical software deserve verified tools?},
  booktitle = {POPL 2011: 38th symposium Principles of Programming Languages},
  pages = {1--2},
  year = 2011,
  month = jan,
  address = {Austin, TX, USA},
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/popl11-invited-talk.pdf},
  urlpublisher = {https://doi.org/10.1145/1926385.1926387},
  doi = {10.1145/1926385.1926387},
  hal = {hal-01076682},
  xtopic = {compverif},
  xinvited = {yes},
  abstract = {
The formal verification of programs have progressed tremendously in
the last decade.  Principled but once academic approaches such as
Hoare logic and abstract interpretation finally gave birth to quality
verification tools, operating over source code (and not just idealized
models thereof) and able to verify complex real-world applications.
In this talk, I review some of the obstacles that
remain to be lifted before source-level verification tools can be
taken really seriously in the critical software industry: not just as
sophisticated bug-finders, but as elements of absolute confidence in
the correctness of a critical application.
}
}
@inproceedings{Ramananandro-DosReis-Leroy-layout,
  author = {Tahina Ramananandro and Gabriel {Dos Reis} and Xavier Leroy},
  title = {Formal verification of object layout for {C++} multiple inheritance},
  booktitle = {POPL 2011: 38th symposium Principles of Programming Languages},
  pages = {67--79},
  year = 2011,
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/cpp-object-layout.pdf},
  urlpublisher = {https://doi.org/10.1145/1926385.1926395},
  doi = {10.1145/1926385.1926395},
  hal = {hal-00674174},
  year = 2011,
  xtopic = {compverif},
  xranking = {top},
  abstract = {Object layout --- the concrete in-memory representation of objects ---
raises many delicate issues in the case of the C++ language, owing
in particular to multiple inheritance, C compatibility and separate
compilation.  This paper formalizes a family
of C++ object layout scheme and mechanically proves their correctness
against the operational semantics for multiple inheritance of Wasserrab
{\em et al}.  This formalization is flexible enough to account for
space-saving techniques such as empty base class optimization and
tail-padding optimization.  As an application, we obtain the first
formal correctness proofs for realistic, optimized object layout
algorithms, including one based on the popular GNU C++ application
binary interface.  This work provides semantic foundations to discover
and justify new layout optimizations; it is also a first step towards
the verification of a C++ compiler front-end.}
}
@article{Leroy-La-Recherche-10,
  author = {Xavier Leroy},
  title = {Comment faire confiance à un compilateur?},
  journal = {La Recherche},
  xx-note = {Les cahiers de l'INRIA},
  year = {2010},
  month = apr,
  volume = 440,
  urllocal = {https://xavierleroy.org/publi/cahiers-inria-2010.pdf},
  urlpublisher = {https://hal.science/inria-00511377/},
  hal = {inria-00511377},
  xtopic = {compverif},
  popularscience = {yes},
  abstract = {(In French.)  A short introduction to compiler verification, published in the French popular science magazine La Recherche.}
}
@incollection{Leroy-Marktoberdorf-09,
  author = {Xavier Leroy},
  title = {Mechanized semantics},
  booktitle = {Logics and languages for reliability and security},
  editor = {J. Esparza and B. Spanfelner and O. Grumberg},
  series = {NATO Science for Peace and Security Series D: Information and Communication Security},
  number = 25,
  pages = {195--224},
  publisher = {IOS Press},
  year = {2010},
  urllocal = {http://gallium.inria.fr/~xleroy/courses/Marktoberdorf-2009/notes.pdf},
  urlpublisher = {https://doi.org/10.3233/978-1-60750-100-8-195},
  doi = {10.3233/978-1-60750-100-8-195},
  xtopic = {mechsem},
  abstract = {The goal of this lecture is to show how modern theorem provers---in
this case, the Coq proof assistant---can be used to mechanize the specification
of programming languages and their semantics, and to reason over
individual programs and over generic program transformations,
as typically found in compilers.  The topics covered include:
operational semantics (small-step, big-step, definitional
interpreters); a simple form of denotational semantics; axiomatic
semantics and Hoare logic; generation of verification conditions, with
application to program proof; compilation to virtual machine code and
its proof of correctness; an example of an optimizing program
transformation (dead code elimination) and its proof of correctness.}
}
@inproceedings{Rideau-Leroy-regalloc,
  author = {Silvain Rideau and Xavier Leroy},
  title = {Validating register allocation and spilling},
  booktitle = {CC 2010: Compiler Construction},
  year = 2010,
  publisher = {Springer},
  series = {LNCS},
  number = 6011,
  pages = {224-243},
  xtopic = {compcert},
  urllocal = {https://xavierleroy.org/publi/validation-regalloc.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-642-11970-5_13},
  doi = {10.1007/978-3-642-11970-5_13},
  abstract = {Following the translation validation approach to high-assurance
compilation, we describe a new algorithm for validating {\em a
posteriori} the results of a run of register allocation.  The
algorithm is based on backward dataflow inference of
equations between variables, registers and stack locations, and can
cope with sophisticated forms of spilling and live range splitting, as
well as many forms of architectural irregularities such as overlapping
registers.  The soundness of the algorithm was mechanically proved
using the Coq proof assistant.}
}
@article{Appel-Dockins-Leroy-listmachine,
  author = {Andrew W. Appel and Robert Dockins and Xavier Leroy},
  title = {A list-machine benchmark for mechanized metatheory},
  journal = {Journal of Automated Reasoning},
  year = 2012,
  volume = 49,
  number = 3,
  pages = {453--491},
  urllocal = {https://xavierleroy.org/publi/listmachine-journal.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-011-9226-1},
  doi = {10.1007/s10817-011-9226-1},
  xtopic = {mechsem},
  abstract = {We propose a benchmark to compare theorem-proving systems
on their ability to express proofs of compiler correctness.
In contrast to the first POPLmark, we emphasize the connection
of proofs to compiler implementations, and we point out
that much can be done without binders or alpha-conversion.
We propose specific criteria for evaluating the utility
of mechanized metatheory systems; we have constructed
solutions in both Coq and Twelf metatheory, and
we draw conclusions about those two systems in particular.}
}
@inproceedings{Tristan-Leroy-softpipe,
  author = {Jean-Baptiste Tristan and Xavier Leroy},
  title = {A simple, verified validator for software pipelining},
  booktitle = {POPL 2010: 37th symposium Principles of Programming Languages},
  pages = {83--92},
  publisher = {ACM},
  year = 2010,
  urllocal = {https://xavierleroy.org/publi/validation-softpipe.pdf},
  urlpublisher = {https://doi.org/10.1145/1706299.1706311},
  doi = {10.1145/1706299.1706311},
  xtopic = {compverif},
  xranking = {top},
  abstract = { Software pipelining is a loop optimization that overlaps the
  execution of several iterations of a loop to expose more
  instruction-level parallelism.  It can result in first-class
  performances characteristics, but at the cost of significant
  obfuscation of the code, making this optimization difficult to test
  and debug. In this paper, we present a translation validation
  algorithm that uses symbolic evaluation to detect semantics
  discrepancies between a loop and its pipelined version. Our
  algorithm can be implemented simply and efficiently, is provably
  sound, and appears to be complete with respect to most modulo
  scheduling algorithms.  A conclusion of this case study is that it is
  possible and effective to use symbolic evaluation to reason about
  loop transformations.}
}
@article{Leroy-Compcert-CACM,
  author = {Xavier Leroy},
  title = {Formal verification of a realistic compiler},
  journal = {Communications of the ACM},
  year = 2009,
  volume = 52,
  number = 7,
  pages = {107--115},
  xtopic = {compcert},
  urllocal = {https://xavierleroy.org/publi/compcert-CACM.pdf},
  urlpublisher = {https://cacm.acm.org/magazines/2009/7/32099-formal-verification-of-a-realistic-compiler/fulltext},
  doi = {10.1145/1538788.1538814},
  urlauthorizer = {http://dl.acm.org/authorize?187014},
  abstract = {This paper reports on the development and formal verification (proof
of semantic preservation) of CompCert, a compiler from Clight (a
large subset of the C programming language) to PowerPC assembly code,
using the Coq proof assistant both for programming the compiler and
for proving its correctness.  Such a verified compiler is useful in
the context of critical software and its formal verification: the
verification of the compiler guarantees that the safety properties
proved on the source code hold for the executable compiled code as
well.}
}
@article{Dargaye-Leroy-uncurrying,
  author = {Zaynah Dargaye and Xavier Leroy},
  title = {A verified framework for higher-order uncurrying optimizations},
  journal = {Higher-Order and Symbolic Computation},
  urllocal = {https://xavierleroy.org/publi/higher-order-uncurrying.pdf},
  urlpublisher = {https://doi.org/10.1007/s10990-010-9050-z},
  doi = {10.1007/s10990-010-9050-z},
  year = 2009,
  volume = 22,
  number = 3,
  pages = {199--231},
  xtopic = {compverif},
  abstract = {Function uncurrying is an important optimization for the efficient
execution of functional programming languages.  This optimization
replaces curried functions by uncurried, multiple-argument functions,
while preserving the ability to evaluate partial applications.
First-order uncurrying (where curried functions are optimized only in
the static scopes of their definitions) is well understood and
implemented by many compilers, but its extension to higher-order
functions (where uncurrying can also be performed on
parameters and results of higher-order functions) is challenging.
This article develops a generic framework that expresses higher-order
uncurrying optimizations as type-directed insertion of coercions, and
prove its correctness.  The proof uses step-indexed logical relations
and was entirely mechanized using the Coq proof assistant.}
}
@article{Hirschowitz-Leroy-Wells-hosc,
  author = {Tom Hirschowitz and Xavier Leroy and J. B. Wells},
  title = {Compilation of extended recursion
                         in call-by-value functional languages},
  journal = {Higher-Order and Symbolic Computation},
  volume = 22,
  number = 1,
  pages = {3--66},
  year = 2009,
  urllocal = {https://xavierleroy.org/publi/letrec-cbv.pdf},
  urlpublisher = {https://doi.org/10.1007/s10990-009-9042-z},
  doi = {10.1007/s10990-009-9042-z},
  xtopic = {caml},
  abstract = {This paper formalizes and proves correct a compilation scheme for
 mutually-recursive definitions in call-by-value functional
 languages.  This scheme supports a wider range of recursive
 definitions than previous methods.  We formalize our technique as a
 translation scheme to a lambda-calculus featuring in-place update of
 memory blocks, and prove the translation to be correct.}
}
@inproceedings{Tristan-Leroy-LCM,
  author = {Jean-Baptiste Tristan and Xavier Leroy},
  title = {Verified validation of {Lazy} {Code} {Motion}},
  booktitle = {PLDI 2009: Programming Language Design and Implementation},
  year = 2009,
  publisher = {ACM},
  pages = {316--326},
  urllocal = {https://xavierleroy.org/publi/validation-LCM.pdf},
  urlpublisher = {https://doi.org/10.1145/1542476.1542512},
  doi = {10.1145/1542476.1542512},
  xtopic = {compverif},
  xranking = {top},
  abstract = { Translation validation establishes a posteriori the
  correctness of a run of a compilation pass or other program
  transformation.  In this paper, we develop an efficient translation
  validation algorithm for the Lazy Code Motion (LCM) optimization.
  LCM is an interesting challenge for validation because it is a
  global optimization that moves code across loops.  Consequently,
  care must be taken not to move computations that may fail before
  loops that may not terminate.  Our validator includes a specific
  check for anticipability to rule out such incorrect moves.
  We present a mechanically-checked proof of correctness of the
  validation algorithm, using the Coq proof assistant.
  Combining our validator with an unverified implementation of LCM, we
  obtain a LCM pass that is provably semantics-preserving and 
  was integrated in the CompCert formally verified compiler.}
}
@article{Blazy-Leroy-Clight-09,
  author = {Sandrine Blazy and Xavier Leroy},
  title = {Mechanized semantics for the {Clight}
                         subset of the {C} language},
  year = 2009,
  urllocal = {https://xavierleroy.org/publi/Clight.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-009-9148-3},
  doi = {10.1007/s10817-009-9148-3},
  journal = {Journal of Automated Reasoning},
  volume = 43,
  number = 3,
  pages = {263-288},
  xtopic = {mechsem},
  abstract = {This article presents the formal semantics of a large subset of the C
language called Clight. Clight includes pointer arithmetic,
struct and union types, C loops and structured switch
statements. Clight is the source language of the CompCert
verified compiler. The formal semantics of Clight is a big-step
semantics equipped with traces of input/output events that observes
both terminating and diverging executions.  The formal semantics of
Clight is mechanized using the Coq proof assistant. In addition
to the semantics of Clight, this article describes its integration
in the CompCert verified compiler and several ways by which the
semantics was validated.}
}
@article{Leroy-backend,
  author = {Xavier Leroy},
  title = {A formally verified compiler back-end},
  journal = {Journal of Automated Reasoning},
  volume = 43,
  number = 4,
  pages = {363--446},
  year = 2009,
  urllocal = {https://xavierleroy.org/publi/compcert-backend.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-009-9155-4},
  doi = {10.1007/s10817-009-9155-4},
  xtopic = {compcert},
  abstract = {
This article describes the development and formal verification (proof
of semantic preservation) of a compiler back-end from Cminor (a simple
imperative intermediate language) to PowerPC assembly code, using the
Coq proof assistant both for programming the compiler and for proving
its correctness.  Such a verified compiler is useful in the context of
formal methods applied to the certification of critical software: the
verification of the compiler guarantees that the safety properties
proved on the source code hold for the executable compiled code as
well.  (Much extended and revised version of \cite{Leroy-compcert-06}.)}
}
@article{Leroy-Blazy-memory-model,
  author = {Xavier Leroy and Sandrine Blazy},
  title = {Formal verification of a {C}-like memory model
                         and its uses for verifying program transformations},
  journal = {Journal of Automated Reasoning},
  year = {2008},
  volume = 41,
  number = 1,
  pages = {1--31},
  xtopic = {mechsem},
  urllocal = {https://xavierleroy.org/publi/memory-model-journal.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-008-9099-0},
  doi = {10.1007/s10817-008-9099-0},
  abstract = {
This article presents the formal verification, using the Coq proof
assistant, of a memory model for low-level imperative languages such
as C and compiler intermediate languages.  Beyond giving semantics
to pointer-based programs, this model supports reasoning over
transformations of such programs.  We show how the properties of the
memory model are used to prove semantic preservation for three passes
of the Compcert verified compiler.}
}
@article{Rideau-Serpette-Leroy-parmov,
  author = {Laurence Rideau and Bernard P. Serpette and
                         Xavier Leroy},
  title = {Tilting at windmills with {Coq}:
                         formal verification of a compilation algorithm
                         for parallel moves},
  journal = {Journal of Automated Reasoning},
  year = {2008},
  volume = 40,
  number = 4,
  pages = {307--326},
  xtopic = {compcert},
  urllocal = {https://xavierleroy.org/publi/parallel-move.pdf},
  urlpublisher = {https://doi.org/10.1007/s10817-007-9096-8},
  doi = {10.1007/s10817-007-9096-8},
  abstract = {
This article describes the formal verification of a compilation algorithm
that transforms parallel moves (parallel assignments between variables)
into a semantically-equivalent sequence of elementary moves.
Two different specifications of the algorithm are given: an inductive
specification and a functional one, each with its correctness proofs.
A functional program can then be extracted and integrated in the
Compcert verified compiler.}
}
@article{Leroy-Grall-coindsem,
  author = {Xavier Leroy and Hervé Grall },
  title = {Coinductive big-step operational semantics},
  journal = {Information and Computation},
  volume = 207,
  number = 2,
  pages = {284--304},
  year = 2009,
  urllocal = {https://xavierleroy.org/publi/coindsem-journal.pdf},
  urlpublisher = {https://doi.org/10.1016/j.ic.2007.12.004},
  doi = {10.1016/j.ic.2007.12.004},
  abstract = {Using a call-by-value functional language as an example, this article
illustrates the use of coinductive definitions and proofs in big-step
operational semantics, enabling it to describe diverging evaluations
in addition to terminating evaluations.  We formalize the connections
between the coinductive big-step semantics and the standard small-step
semantics, proving that both semantics are equivalent.  We then study
the use of coinductive big-step semantics in proofs of type soundness
and proofs of semantic preservation for compilers.
A methodological originality of this paper is that all results
have been proved using the Coq proof assistant.  We explain the
proof-theoretic presentation of coinductive definitions and proofs
offered by Coq, and show that it facilitates the discovery and the
presentation of the results. (See
\verb|http://gallium.inria.fr/~xleroy/coindsem/|
for the Coq on-machine formalization of these results.)},
  xtopic = {mechsem}
}
@inproceedings{Tristan-Leroy-scheduling,
  author = {Jean-Baptiste Tristan and Xavier Leroy},
  title = {Formal verification of translation validators:
                 A case study on instruction scheduling optimizations},
  booktitle = {POPL 2008: 35th symposium Principles of Programming Languages},
  pages = {17--27},
  publisher = {ACM},
  year = 2008,
  urllocal = {https://xavierleroy.org/publi/validation-scheduling.pdf},
  urlpublisher = {https://doi.org/10.1145/1328897.1328444},
  doi = {10.1145/1328897.1328444},
  xtopic = {compverif},
  xranking = {top},
  abstract = {Translation validation consists of transforming a program and a
posteriori validating it in order to detect a modification of its
semantics. This approach can be used in a verified compiler, provided
that validation is formally proved to be correct. We present two such
validators and their Coq proofs of correctness.  The validators are
designed for two instruction scheduling optimizations: list
scheduling and trace scheduling.}
}
@inproceedings{Dargaye-Leroy-CPS,
  author = {Zaynah Dargaye and Xavier Leroy},
  title = {Mechanized verification of {CPS} transformations},
  booktitle = {LPAR 2007: Logic for Programming,
                     Artificial Intelligence and Reasoning},
  year = 2007,
  series = {LNAI},
  number = 4790,
  publisher = {Springer},
  pages = {211--225},
  urllocal = {https://xavierleroy.org/publi/cps-dargaye-leroy.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-540-75560-9_17},
  doi = {10.1007/978-3-540-75560-9_17},
  xtopic = {compverif},
  abstract = {
Transformation to continuation-passing style (CPS) is often performed
by optimizing compilers for functional programming languages.  As part
of the development and proof of correctness of a compiler for the
mini-ML functional language, we have mechanically verified the
correctness of two CPS transformations for a call-by-value
$\lambda$-calculus with $n$-ary functions, recursive functions, data
types and pattern-matching.  The transformations generalize Plotkin's
original call-by-value transformation and Danvy and Nielsen's
optimized transformation, respectively.  We used the Coq proof
assistant to formalize the transformations and conduct and check the
proofs.  Originalities of this work include the use of big-step
operational semantics to avoid difficulties with administrative
redexes, and of two-sorted de Bruijn indices to avoid difficulties
with $\alpha$-conversion.}
}
@techreport{Leroy-POPLmark,
  author = {Xavier Leroy},
  title = {A locally nameless solution to the {POPLmark} challenge},
  year = {2007},
  month = jan,
  institution = {INRIA},
  number = {6098},
  type = {Research report},
  urllocal = {https://xavierleroy.org/publi/POPLmark-locally-nameless.pdf},
  urlpublisher = {https://hal.science/inria-00123945},
  xtopic = {mechsem},
  abstract = {
The POPLmark challenge is a collective experiment intended to assess
the usability of theorem provers and proof assistants in the context
of fundamental research on programming languages.  In this report,
we present a solution to the challenge, developed with the Coq proof
assistant, and using the ``locally nameless'' presentation of terms with
binders introduced by McKinna, Pollack, Gordon, and McBride.}
}
@inproceedings{EDOS-ASE06,
  title = {Managing the complexity of large free and
                         open source package-based software distributions},
  author = {Fabio Mancinelli and Roberto {Di Cosmo} and
                         Jérôme Vouillon and Jaap Boender and Berke Durak
                         and Xavier Leroy and Ralf Treinen},
  booktitle = {ASE 2006: 21st Int. Conf. on Automated Software Engineering},
  pages = {199--208},
  publisher = {IEEE Computer Society},
  year = 2006,
  urllocal = {https://xavierleroy.org/publi/edos-ase06.pdf},
  urlpublisher = {https://doi.org/10.1109/ASE.2006.49},
  doi = {10.1109/ASE.2006.49},
  abstract = {
The widespread adoption of Free and Open Source Software (FOSS) in
many strategic contexts of the information technology society has
drawn the attention on the issues regarding how to handle the
complexity of assembling and managing a huge number of (packaged)
components in a consistent and effective way. FOSS distributions (and
in particular GNU/Linux-based ones) have always provided tools for
managing the tasks of installing, removing and upgrading the
(packaged) components they were made of. While these tools provide a
(not always effective) way to handle these tasks on the client side,
there is still a lack of tools that could help the distribution
editors to maintain, on the server side, large and high-quality
distributions. In this paper we present our research whose main goal
is to fill this gap: we show our approach, the tools we have developed
and their application with experimental results. Our contribution
provides an effective and automatic way to support distribution
editors in handling those issues that were, until now, mostly
addressed using ad-hoc tools and manual techniques.},
  xtopic = {edos}
}
@inproceedings{Blazy-Dargaye-Leroy-06,
  author = {Sandrine Blazy and Zaynah Dargaye and Xavier Leroy},
  title = {Formal verification of a {C} compiler front-end},
  booktitle = {FM 2006: Int. Symp. on Formal Methods},
  series = {LNCS},
  publisher = {Springer},
  number = 4085,
  year = 2006,
  pages = {460--475},
  urllocal = {https://xavierleroy.org/publi/cfront.pdf},
  urlpublisher = {https://doi.org/10.1007/11813040_31},
  doi = {10.1007/11813040_31},
  abstract = {This paper presents the formal verification of a compiler front-end
that translates a subset of the C language into the Cminor
intermediate language.  The semantics of the source and target
languages as well as the translation between them have been written in
the specification language of the Coq proof assistant. The proof of
observational semantic equivalence between the source and generated
code has been machine-checked using Coq.  An executable compiler was
obtained by automatic extraction of executable Caml code from the Coq
specification of the translator, combined with a certified compiler
back-end generating PowerPC assembly code from Cminor, described in
previous work.},
  xtopic = {compcert}
}
@inproceedings{Appel-Leroy-listmachine-lfmtp,
  author = {Appel, Andrew W.  and  Leroy, Xavier},
  title = {A list-machine benchmark for mechanized metatheory
                       (extended abstract)},
  booktitle = {LFMTP 2006: Int. Workshop on Logical
                       Frameworks and Meta-Languages},
  year = {2007},
  series = {ENTCS},
  number = {174/5},
  pages = {95--108},
  urllocal = {https://xavierleroy.org/publi/listmachine-lfmtp.pdf},
  urlpublisher = {https://doi.org/10.1016/j.entcs.2007.01.020},
  doi = {10.1016/j.entcs.2007.01.020},
  abstract = {Short version of \cite{Appel-Leroy-listmachine-tr}.},
  xtopic = {mechsem},
  xranking = {workshop}
}
@techreport{Appel-Leroy-listmachine-tr,
  author = {Appel, Andrew W.  and  Leroy, Xavier},
  title = {A list-machine benchmark for mechanized metatheory},
  year = {2006},
  institution = {INRIA},
  number = {5914},
  type = {Research report},
  urllocal = {https://xavierleroy.org/publi/listmachine-tr.pdf},
  hal = {inria-00077531},
  urlhal = {https://hal.science/inria-00077531},
  abstract = {We propose a benchmark to compare theorem-proving systems
on their ability to express proofs of compiler correctness.
In contrast to the first POPLmark, we emphasize the connection
of proofs to compiler implementations, and we point out
that much can be done without binders or alpha-conversion.
We propose specific criteria for evaluating the utility
of mechanized metatheory systems; we have constructed
solutions in both Coq and Twelf metatheory, and
we draw conclusions about those two systems in particular.},
  xtopic = {mechsem}
}
@inproceedings{EDOS-FRCSS06,
  title = {Maintaining large software distributions:
                         new challenges from the {FOSS} era},
  author = {Roberto {Di Cosmo} and Berke Durak and Xavier Leroy
                         and Fabio Mancinelli and Jérôme Vouillon},
  booktitle = {FRCSS 2006: Future Research Challenges for Software and Services},
  series = {EASST Newsletter},
  number = {12},
  pages = {7--20},
  year = 2006,
  urllocal = {https://xavierleroy.org/publi/edos-frcss06.pdf},
  abstract = {In the mainstream adoption of free and open source software (FOSS),
distribution editors play a crucial role: they package, integrate and
distribute a wide variety of software, written in a variety of
languages, for a variety of purposes of unprecedented breadth.
Ensuring the quality of a FOSS distribution is a technical and
engineering challenge, owing to the size and complexity of these
distributions (tens of thousands of software packages). A number of
original topics for research arise from this challenge. This paper is
a gentle introduction to this new research area, and strives to
clearly and formally identify many of the desirable properties that
must be enjoyed by these distributions to ensure an acceptable quality
level.},
  xtopic = {edos}
}
@inproceedings{Leroy-coindsem,
  author = {Xavier Leroy},
  title = {Coinductive big-step operational semantics},
  booktitle = {ESOP 2006: European Symposium on Programming},
  year = 2006,
  publisher = {Springer},
  series = {LNCS},
  number = 3924,
  pages = {54-68},
  urllocal = {https://xavierleroy.org/publi/coindsem.pdf},
  urlpublisher = {https://doi.org/10.1007/11693024_5},
  doi = {10.1007/11693024_5},
  abstract = {This paper illustrates the use of co-inductive definitions and proofs
in big-step operational semantics, enabling the latter to describe
diverging evaluations in addition to terminating evaluations. We show
applications to proofs of type soundness and to proofs of semantic
preservation for compilers.  (See
\verb|http://gallium.inria.fr/~xleroy/publi/coindsem/|
for the Coq on-machine formalization of these results.)},
  xtopic = {mechsem},
  xranking = {leading}
}
@inproceedings{Leroy-compcert-06,
  author = {Xavier Leroy},
  title = {Formal certification of a compiler back-end, or:
                   programming a compiler with a proof assistant},
  booktitle = {POPL 2006: 33rd symposium Principles of Programming Languages},
  year = 2006,
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/compiler-certif.pdf},
  urlpublisher = {https://doi.org/10.1145/1111037.1111042},
  doi = {10.1145/1111037.1111042},
  pages = {42--54},
  abstract = {This paper reports on the development and formal certification (proof
of semantic preservation) of a compiler from Cminor (a C-like
imperative language) to PowerPC assembly code, using the Coq proof
assistant both for programming the compiler and for proving its
correctness.  Such a certified compiler is useful in the context of
formal methods applied to the certification of critical software: the
certification of the compiler guarantees that the safety properties
proved on the source code hold for the executable compiled code as well.},
  xtopic = {compcert},
  xranking = {top}
}
@inproceedings{Blazy-Leroy-05,
  author = {Sandrine Blazy and Xavier Leroy},
  title = {Formal verification of a memory model for
                         {C}-like imperative languages},
  booktitle = {ICFEM 2005: International Conference on
                         Formal Engineering Methods},
  year = 2005,
  number = 3785,
  series = {LNCS},
  publisher = {Springer},
  pages = {280--299},
  urllocal = {https://xavierleroy.org/publi/memory-model.pdf},
  urlpublisher = {https://doi.org/10.1007/11576280_20},
  doi = {10.1007/11576280_20},
  abstract = {
This paper presents a formal verification with the Coq proof assistant
of a memory model for C-like imperative languages. This model defines
the memory layout and the operations that manage the memory. The model
has been specified at two levels of abstraction and implemented as
part of an ongoing certification in Coq of a moderately-optimising C
compiler. Many properties of the memory have been verified in the
specification. They facilitate the definition of precise formal
semantics of C pointers. A certified OCaml code implementing the
memory model has been automatically extracted from the
specifications.},
  xtopic = {mechsem}
}
@inproceedings{EDOS-OSS-05,
  author = {
Serge Abiteboul and
Ciarán Bryce and
Roberto {Di Cosmo} and
Klaus R. Dittrich and
Stéfane Fermigier and
Stéphane Laurière and
Frédéric Lepied and
Xavier Leroy and
Tova Milo and
Eleonora Panto and
Radu Pop and
Assaf Sagi and
Yotam Shtossel and
Florent Villard and
Boris Vrdoljak},
  title = {{EDOS}: {Environment} for the {Development} and
                         {Distribution} of {Open} {Source} {Software}},
  booktitle = {OSS 2005: International Conference on {Open} {Source} Systems},
  year = 2005,
  urllocal = {https://xavierleroy.org/publi/edos-oss05.pdf},
  abstract = {
The open-source software community is now
comprised of a very large and growing number of
contributors and users. The GNU/Linux operating system for
instance has an estimated 18 million users worldwide and its
contributing developers can be counted by thousands. The
critical mass of contributors taking part in various opensource
projects has helped to ensure high quality for open
source software. However, despite the achievements of the
open-source software industry, there are issues in the
production of large scale open-source software (OSS) such as
the GNU/Linux operating system that have to be addressed as
the numbers of users, of contributors, and of available
applications grow. EDOS is a European project supported by
IST started October 2004 and ending in 2007, whose
objective is to provide a new generation of methodologies,
theoretical models, technical tools and quality models
specifically tailored to OSS engineering and to software
distribution over the Internet.},
  xtopic = {edos}
}
@inproceedings{Bertot-Gregoire-Leroy-05,
  author = {Yves Bertot and Benjamin Grégoire and Xavier Leroy},
  title = {A structured approach to proving compiler
                         optimizations based on dataflow analysis},
  booktitle = {TYPES 2004: Types for Proofs and Programs},
  year = 2006,
  series = {LNCS},
  number = 3839,
  pages = {66--81},
  publisher = {Springer},
  urllocal = {https://xavierleroy.org/publi/proofs_dataflow_optimizations.pdf},
  urlpublisher = {https://doi.org/10.1007/11617990_5},
  doi = {10.1007/11617990_5},
  abstract = {
This paper reports on the correctness proof of compiler optimizations
based on data-flow analysis.  We formulate the optimizations and
analyses as instances of a general framework for data-flow analyses and
transformations, and prove that the optimizations preserve the
behavior of the compiled programs.  This development is a part of a
larger effort of certifying an optimizing compiler by proving semantic
equivalence between source and compiled code.},
  xtopic = {compverif}
}
@article{Hirschowitz-Leroy-TOPLAS,
  author = {Tom Hirschowitz and Xavier Leroy},
  title = {Mixin modules in a call-by-value setting},
  journal = {ACM Transactions on Programming Languages and Systems},
  volume = 27,
  number = 5,
  pages = {857--881},
  year = 2005,
  urllocal = {https://xavierleroy.org/publi/mixins-cbv-toplas.pdf},
  urlpublisher = {https://doi.org/10.1145/1086642.1086644},
  doi = {10.1145/1086642.1086644},
  abstract = {The ML module system provides powerful parameterization facilities,
but lacks the ability to split mutually recursive definitions across
modules, and does not provide enough facilities for incremental
programming.  A promising approach to solve these issues is Ancona and
Zucca's mixin modules calculus CMS.  However, the straightforward
way to adapt it to ML fails, because it allows arbitrary recursive
definitions to appear at any time, which ML does not support.  In
this paper, we enrich CMS with a refined type system that controls
recursive definitions through the use of dependency graphs.  We then
develop a separate compilation scheme, directed by
dependency graphs, that translate mixin modules down to a CBV
lambda-calculus extended with a non-standard let rec construct},
  xtopic = {modules}
}
@inproceedings{Hirschowitz-Leroy-Wells-04,
  author = {Tom Hirschowitz and Xavier Leroy and J. B. Wells},
  title = {Call-by-value mixin modules: reduction semantics, 
                  side effects, types},
  booktitle = {ESOP 2004: European Symposium on Programming},
  number = 2986,
  series = {LNCS},
  publisher = {Springer},
  pages = {64--78},
  year = {2004},
  urllocal = {https://xavierleroy.org/publi/mixins-mm-esop2004.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-540-24725-8_6},
  doi = {10.1007/978-3-540-24725-8_6},
  abstract = {Mixin modules are a framework for modular programming
that supports code parameterization, incremental programming via late
binding and redefinitions, and cross-module recursion.  In this paper,
we develop a language of mixin modules that supports call-by-value
evaluation, and formalize a reduction semantics and a sound type
system for this language.},
  xtopic = {modules},
  xranking = {leading}
}
@inproceedings{Hirschowitz-Leroy-Wells-rec,
  author = {Tom Hirschowitz and Xavier Leroy and J. B. Wells},
  title = {Compilation of extended recursion
                         in call-by-value functional languages},
  booktitle = {PPDP 2003: International Conference on Principles and Practice of Declarative Programming},
  year = 2003,
  pages = {160--171},
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/compil-recursion.pdf},
  urlpublisher = {https://doi.org/10.1145/888251.888267},
  doi = {10.1145/888251.888267},
  abstract = {This paper formalizes and proves correct a compilation scheme for
mutually-recursive definitions in call-by-value functional
languages. This scheme supports a wider range of recursive definitions
than standard call-by-value recursive definitions. We formalize our
technique as a translation scheme to a lambda-calculus featuring
in-place update of memory blocks, and prove the translation to be
faithful.},
  xtopic = {caml}
}
@inproceedings{Leroy-security-survey-03,
  author = {Xavier Leroy},
  title = {Computer security from a programming language 
                         and static analysis perspective},
  booktitle = {ESOP 2003: Programming Languages and Systems,
                         12th European Symposium on Programming},
  pages = {1--9},
  number = 2618,
  series = {LNCS},
  publisher = {Springer},
  year = 2003,
  mon = apr,
  address = {Warsaw, Poland},
  urllocal = {https://xavierleroy.org/publi/language-security-etaps03.pdf},
  urlpublisher = {https://doi.org/10.1007/3-540-36575-3_1},
  doi = {10.1007/3-540-36575-3_1},
  abstract = {A short survey on language-base computer security. Extended abstract of invited lecture.},
  xtopic = {security},
  xinvited = {yes}
}
@article{Leroy-bytecode-verification-03,
  author = {Xavier Leroy},
  title = {{Java} bytecode verification: 
                         algorithms and formalizations},
  journal = {Journal of Automated Reasoning},
  year = 2003,
  volume = 30,
  number = {3--4},
  pages = {235--269},
  urllocal = {https://xavierleroy.org/publi/bytecode-verification-JAR.pdf},
  urlpublisher = {https://doi.org/10.1023/A:1025055424017},
  doi = {10.1023/A:1025055424017},
  abstract = {Bytecode verification is a crucial security component for Java
applets, on the Web and on embedded devices such as smart cards.
This paper reviews the various bytecode verification algorithms that have
been proposed, recasts them in a common framework of dataflow analysis, and 
surveys the use of proof assistants to specify bytecode verification and
prove its correctness.  
(Extended and revised version of \cite{Leroy-survey-verif}.)},
  xtopic = {security}
}
@inproceedings{Aponte-Leroy-94,
  author = {María-Virginia Aponte and Xavier Leroy},
  title = {Llamado de procedimientos a distancia y
                         abstracción de tipos},
  booktitle = {Proc. 20th CLEI PANEL latino-american 
                         computer science conference},
  pages = {1281--1292},
  year = 1994,
  urllocal = {https://xavierleroy.org/publi/rpc-abstract-types.pdf},
  xtopic = {misc},
  xranking = {national},
  abstract = {En este artículo estudiamos la relación entre el llamado de
procedimientos a distancia (RPC) y los lenguajes con tipaje estático y
abstracción de tipos.  En particular, mostramos como explotar la
información de tipos afin de reducir el tiempo de transmisión de datos
a través de la red.  Con este propósito, desarrollamos una
formalisación simple que describe la generación automática de
interfaces eficientes de comunicación.  Terminamos nuestro estudio con
una prueba de corrección que muestra la equivalencia entre la
evaluación local y la evaluación distribuída de todo programa.}
}
@techreport{Cardelli-Leroy-dot2,
  author = {Luca Cardelli and Xavier Leroy},
  title = {Abstract types and the dot notation},
  institution = {Digital Equipment Corporation, Systems Research Center},
  type = {Research report},
  year = {1990},
  number = {56},
  urlpublisher = {http://apotheca.hpl.hp.com/ftp/pub/DEC/SRC/research-reports/abstracts/src-rr-056.html},
  abstract = {Same technical contents as the conference paper \cite{Cardelli-Leroy-dot}, 
plus a drawing by Luca Cardelli},
  xtopic = {types}
}
@inproceedings{Cardelli-Leroy-dot,
  author = {Luca Cardelli and Xavier Leroy},
  title = {Abstract types and the dot notation},
  booktitle = {Proceedings IFIP TC2 working conference
                         on programming concepts and methods},
  pages = {479--504},
  publisher = {North-Holland},
  year = {1990},
  urllocal = {https://xavierleroy.org/publi/abstract-types-dot-notation.pdf},
  abstract = {We investigate the use of the dot notation in the context of
abstract types.  The dot notation -- that is, a.f referring to
the operation f provided by the abstraction a -- is used by
programming languages such as Modula-2 and CLU. We compare
this notation with the Mitchell-Plotkin approach, which draws
a parallel between type abstraction and (weak) existential
quantification in constructive logic. The basic operations on
existentials coming from logic give new insights about the
meaning of type abstraction, but differ completely from the
more familiar dot notation. In this paper, we formalize simple
calculi equipped with the dot notation, and relate them to a
more classical calculus a la Mitchell and Plotkin. This work
provides some theoretical foundations for the dot notation,
and suggests some useful extensions.},
  xtopic = {types}
}
@inproceedings{OCamlP3L-98,
  author = {Marco Danelutto and Roberto {Di Cosmo}
                         and Xavier Leroy and Susanna Pelagatti},
  title = {Parallel functional programming with skeletons:
                         the {OCamlP3L} experiment},
  booktitle = {Proceedings ACM workshop on ML and its applications},
  publisher = {Cornell University},
  year = {1998},
  urllocal = {https://xavierleroy.org/publi/ocamlp3l-mlws.pdf},
  abstract = {This paper reports on skeleton-based parallel programming in the context of 
the Caml functional language.  An experimental implementation, based on
TCP sockets and marshaling of function closures, is described and
assessed.},
  xtopic = {caml},
  xranking = {workshop}
}
@inproceedings{Doligez-Leroy-gc,
  author = {Damien Doligez and Xavier Leroy},
  title = {A concurrent, generational garbage collector
                         for a multithreaded implementation of {ML}},
  booktitle = {POPL 1993: 20th symposium Principles of Programming Languages},
  year = {1993},
  pages = {113--123},
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/concurrent-gc.pdf},
  urlpublisher = {https://doi.org/10.1145/158511.158611},
  doi = {10.1145/158511.158611},
  abstract = {This paper presents the design and implementation of a ``quasi
real-time'' garbage collector for Concurrent Caml Light, an
implementation of ML with threads. This two-generation system
combines a fast, asynchronous copying collector on the young
generation with a non-disruptive concurrent marking collector
on the old generation.  This design crucially relies on the ML
compile-time distinction between mutable and immutable
objects.},
  xtopic = {caml},
  xranking = {top}
}
@inproceedings{Gregoire-Leroy-02,
  author = {Benjamin Gr{\'e}goire and Xavier Leroy},
  title = {A compiled implementation of strong reduction},
  booktitle = {ICFP 2002: International Conference on Functional Programming},
  pages = {235--246},
  publisher = {ACM},
  year = 2002,
  urllocal = {https://xavierleroy.org/publi/strong-reduction.pdf},
  urlpublisher = {https://doi.org/10.1145/581478.581501},
  doi = {10.1145/581478.581501},
  abstract = {Motivated by applications to proof assistants based on dependent
types, we develop and prove correct a strong reducer and
$\beta$-equivalence checker for the $\lambda$-calculus with products,
sums, and guarded fixpoints.  Our approach is based on compilation to
the bytecode of an abstract machine performing weak reductions on
non-closed terms, derived with minimal modifications from the ZAM
machine used in the Objective Caml bytecode interpreter, and
complemented by a recursive ``read back'' procedure.  An
implementation in the Coq proof assistant demonstrates important
speed-ups compared with the original interpreter-based implementation
of strong reduction in Coq.},
  xtopic = {caml},
  xranking = {top}
}
@inproceedings{Leroy-appl-functors,
  author = {Xavier Leroy},
  title = {Applicative functors and fully transparent
                         higher-order modules},
  booktitle = {POPL 1995: 22nd symposium Principles of Programming Languages},
  year = 1995,
  publisher = {ACM},
  pages = {142--153},
  urllocal = {https://xavierleroy.org/publi/applicative-functors.pdf},
  urlpublisher = {https://doi.org/10.1145/199448.199476},
  doi = {10.1145/199448.199476},
  abstract = {We present a variant of the Standard ML module system where
parameterized abstract types (i.e. functors returning generative
types) map provably equal arguments to compatible abstract types,
instead of generating distinct types at each application as in
Standard ML. This extension solves the full transparency problem (how
to give syntactic signatures for higher-order functors that express
exactly their propagation of type equations), and also provides better
support for non-closed code fragments.},
  xtopic = {modules},
  xranking = {top}
}
@inproceedings{Leroy-csl-jfla,
  author = {Xavier Leroy},
  title = {Le syst\`eme {Caml} {Special} {Light}:
                         modules et compilation efficace en {Caml}},
  booktitle = {Actes des Journ\'ees Francophones des 
                         Langages Applicatifs},
  year = 1996,
  month = jan,
  pages = {111--131},
  publisher = {INRIA},
  xtopic = {caml},
  xranking = {national}
}
@techreport{Leroy-csl-rr,
  author = {Xavier Leroy},
  title = {Le syst\`eme {Caml} {Special} {Light}:
                         modules et compilation efficace en {Caml}},
  institution = {INRIA},
  type = {Research report},
  year = {1995},
  month = nov,
  number = {2721},
  urllocal = {https://xavierleroy.org/publi/caml-special-light-rr.pdf},
  hal = {inria-00073972},
  urlhal = {https://hal.science/inria-00073972},
  abstract = {Ce rapport présente une vue d'ensemble du système Caml
Special Light, une implémentation expérimentale du langage Caml
offrant deux extensions majeures: premièrement, un calcul de modules
(incluant les foncteurs et les vues multiples d'un même module) dans
le style de celui de Standard ML, mais s'appuyant sur les avancées
récentes dans la théorie du typage des modules et préservant la
compatibilité avec la compilation séparée;
deuxièmement, un double compilateur, produisant à la fois du
du code natif efficace, pour les applications de Caml gourmandes en
temps de calcul, et du code abstrait interprété, pour la rapidité de
compilation et le confort de mise au point.},
  xtopic = {caml}
}
@article{Leroy-generativity,
  author = {Xavier Leroy},
  title = {A syntactic theory of type generativity
                         and sharing},
  year = 1996,
  journal = {Journal of Functional Programming},
  volume = 6,
  number = 5,
  pages = {667--698},
  urllocal = {https://xavierleroy.org/publi/syntactic-generativity.pdf},
  urlpublisher = {https://doi.org/10.1017/S0956796800001933},
  doi = {10.1017/S0956796800001933},
  abstract = {This paper presents a purely syntactic account of type generativity
and sharing -- two key mechanisms in the Standard ML module system
-- and shows its equivalence with the traditional
stamp-based description of these mechanisms. This syntactic
description recasts the Standard ML module system in a more
abstract, type-theoretic framework.},
  xtopic = {modules}
}
@inproceedings{Leroy-intro-tic98,
  author = {Xavier Leroy},
  title = {An overview of Types in Compilation},
  booktitle = {TIC 1998: workshop Types in Compilation},
  pages = {1--8},
  series = {LNCS},
  number = 1473,
  publisher = {Springer},
  year = {1998},
  month = mar,
  urllocal = {https://xavierleroy.org/publi/intro-tic98.pdf},
  urlpublisher = {https://doi.org/10.1007/BFb0055509},
  doi = {10.1007/BFb0055509},
  abstract = {A short survey of the uses of types in compilation.},
  xtopic = {typecomp}
}
@inproceedings{Leroy-latex2html,
  author = {Xavier Leroy},
  title = {Lessons learned from the translation of
                         documentation from {LaTeX} to {HTML}},
  booktitle = {ERCIM/W4G Workshop on WWW
                         Authoring and Integration Tools},
  year = 1995,
  month = feb,
  urllocal = {https://xavierleroy.org/publi/w4g.html},
  xtopic = {misc},
  xranking = {workshop},
  abstract = {This presentation reports on the transformation of a 200-pages LaTeX
technical documentation into HTML for inclusion in the World Wide Web,
and on the tools that had to be developed for this purpose.}
}
@inproceedings{Leroy-manifest-types,
  author = {Xavier Leroy},
  title = {Manifest types, modules, and separate compilation},
  booktitle = {POPL 1994: 21st symposium Principles of Programming Languages},
  year = 1994,
  publisher = {ACM},
  pages = {109--122},
  urllocal = {https://xavierleroy.org/publi/manifest-types-popl.pdf},
  urlpublisher = {https://doi.org/10.1145/174675.176926},
  doi = {10.1145/174675.176926},
  abstract = {This paper presents a variant of the SML module system that
introduces a strict distinction between abstract types and
manifest types (types whose definitions are part of the module
specification), while retaining most of the expressive power
of the SML module system.  The resulting module system
provides much better support for separate compilation.},
  xtopic = {modules},
  xranking = {top}
}
@article{Leroy-modular-modules,
  author = {Xavier Leroy},
  title = {A modular module system},
  journal = {Journal of Functional Programming},
  volume = 10,
  number = 3,
  year = 2000,
  pages = {269--303},
  urllocal = {https://xavierleroy.org/publi/modular-modules-jfp.pdf},
  urlpublisher = {https://doi.org/10.1017/S0956796800003683},
  doi = {10.1017/S0956796800003683},
  abstract = {A simple implementation of a SML-like module system is presented as a
module parameterized by a base language and its type-checker. This
demonstrates constructively the applicability of that module system to
a wide range of programming languages. Full source code available
in the Web appendix \url{http://gallium.inria.fr/~xleroy/publi/modular-modules-appendix/}},
  xtopic = {modules}
}
@inproceedings{Leroy-survey-verif,
  author = {Xavier Leroy},
  title = {{Java} bytecode verification: an overview},
  booktitle = {CAV 2001: Computer Aided Verification},
  year = 2001,
  publisher = {Springer},
  series = {LNCS},
  number = 2102,
  pages = {265--285},
  urllocal = {https://xavierleroy.org/publi/survey-bytecode-verification.pdf},
  urlpublisher = {https://doi.org/10.1007/3-540-44585-4_26},
  doi = {10.1007/3-540-44585-4_26},
  abstract = {Preliminary version of \cite{Leroy-bytecode-verification-03}.},
  xtopic = {security},
  xranking = {top}
}
@inproceedings{Leroy-oncard-verifier-conf,
  author = {Xavier Leroy},
  title = {On-card bytecode verification for {Java} {Card}},
  booktitle = {E-SMART 2001: Smart card programming and security},
  publisher = {Springer},
  series = {LNCS},
  number = 2140,
  pages = {150--164},
  year = 2001,
  urllocal = {https://xavierleroy.org/publi/oncard-verifier-esmart.pdf},
  urlpublisher = {https://doi.org/10.1007/3-540-45418-7_13},
  doi = {10.1007/3-540-45418-7_13},
  abstract = {Preliminary version of \cite{Leroy-oncard-verifier-journal}},
  xtopic = {security}
}
@article{Leroy-oncard-verifier-journal,
  author = {Xavier Leroy},
  title = {Bytecode verification on {Java} smart card},
  journal = {Software -- Practice \& Experience},
  year = 2002,
  volume = 32,
  number = 4,
  pages = {319--340},
  urllocal = {https://xavierleroy.org/publi/oncard-verifier-spe.pdf},
  urlpublisher = {https://doi.org/10.1002/spe.438},
  doi = {10.1002/spe.438},
  abstract = {This article presents a novel approach to the problem of bytecode
verification for Java Card applets.  By relying on prior off-card
bytecode transformations, we simplify the bytecode verifier and reduce
its memory requirements to the point where it can be embedded on a
smart card, thus increasing significantly the security of post-issuance
downloading of applets on Java Cards.  This article describes the
on-card verification algorithm and the off-card code
transformations, and evaluates experimentally their impact on applet
code size.},
  xtopic = {security}
}
@inproceedings{Hirschowitz-Leroy-cbv-mixins-esop,
  author = {Tom Hirschowitz and Xavier Leroy},
  title = {Mixin modules in a call-by-value setting},
  booktitle = {ESOP 2002: European Symposium on Programming},
  series = {LNCS},
  publisher = {Springer},
  number = 2305,
  pages = {6--20},
  year = 2002,
  urllocal = {https://xavierleroy.org/publi/mixins-cbv-esop2002.pdf},
  urlpublisher = {http://www.springerlink.com/openurl.asp?genre=article&issn=0302-9743&volume=2305&spage=6},
  abstract = {The ML module system provides powerful parameterization facilities,
but lacks the ability to split mutually recursive definitions across
modules, and does not provide enough facilities for incremental
programming.  A promising approach to solve these issues is Ancona and
Zucca's mixin modules calculus CMS.  However, the straightforward
way to adapt it to ML fails, because it allows arbitrary recursive
definitions to appear at any time, which ML does not support.  In
this paper, we enrich CMS with a refined type system that controls
recursive definitions through the use of dependency graphs.  We then
develop a separate compilation scheme, directed by
dependency graphs, that translate mixin modules down to a CBV
lambda-calculus extended with a non-standard let rec construct},
  xtopic = {modules},
  xranking = {leading}
}
@techreport{Hirschowitz-Leroy-Wells-mm-tr,
  author = {Tom Hirschowitz and Xavier Leroy and J. B. Wells},
  title = {A reduction semantics for call-by-value
                         mixin modules },
  institution = {INRIA},
  type = {Research report},
  year = 2003,
  number = 4682,
  hal = {inria-00071903},
  urlhal = {https://hal.science/inria-00071903},
  abstract = {Module systems are important for software engineering: they
facilitate code reuse without compromising the correctness of
programs. However, they still lack some flexibility: first, they do
not allow mutually recursive definitions to span module boundaries;
second, definitions inside modules are bound early, and cannot be
overridden later, as opposed to inheritance and overriding in
class-based object-oriented languages, which follow the late binding
semantics. This paper examines an alternative, hybrid idea of
modularization concept, called mixin modules. We develop a language of
call-by-value mixin modules with a reduction semantics, and a sound
type system for it, guaranteeing that programs will run correctly.},
  xtopic = {modules}
}
@techreport{Hirschowitz-Leroy-Wells-rec-tr,
  author = {Tom Hirschowitz and Xavier Leroy and J. B. Wells},
  title = {On the implementation of recursion in call-by-value 
                         functional languages},
  institution = {INRIA},
  type = {Research report},
  year = 2003,
  number = 4728,
  hal = {inria-00071858},
  urlhal = {https://hal.science/inria-00071858},
  abstract = {Extended version of \cite{Hirschowitz-Leroy-Wells-rec}.},
  xtopic = {modules}
}
@article{Leroy-Mauny-dynamics,
  author = {Xavier Leroy and Michel Mauny},
  title = {Dynamics in {ML}},
  journal = {Journal of Functional Programming},
  year = 1993,
  volume = 3,
  number = 4,
  pages = {431--463},
  urllocal = {https://xavierleroy.org/publi/dynamics-in-ML.pdf},
  urlpublisher = {https://doi.org/10.1017/S0956796800000848},
  doi = {10.1017/S0956796800000848},
  abstract = {Objects with dynamic types allow the integration of operations
that essentially require run-time type-checking into
statically-typed languages. This article presents two
extensions of the ML language with dynamics, based on our work
on the CAML implementation of ML, and discusses their
usefulness. The main novelty of this work is the combination
of dynamics with polymorphism.},
  xtopic = {types}
}
@inproceedings{Leroy-Mauny-dynamics1,
  author = {Xavier Leroy and Michel Mauny},
  title = {Dynamics in {ML}},
  booktitle = {FPCA 1991: {Functional Programming Languages and Computer Architecture}},
  series = {LNCS},
  publisher = {Springer},
  number = {523},
  year = {1991},
  pages = {406--426},
  urllocal = {https://xavierleroy.org/publi/dynamics-in-ML-fpca.pdf},
  abstract = {Preliminary version of \cite{Leroy-Mauny-dynamics}.},
  xtopic = {types}
}
@inproceedings{Leroy-poly-par-nom,
  author = {Xavier Leroy},
  title = {Polymorphism by name
                         for references and continuations},
  booktitle = {POPL 1993: 20th symposium Principles of Programming Languages},
  year = {1993},
  pages = {220--231},
  publisher = {ACM},
  urllocal = {https://xavierleroy.org/publi/polymorphism-by-name.pdf},
  urlpublisher = {https://doi.org/10.1145/158511.158632},
  doi = {10.1145/158511.158632},
  abstract = {This article investigates an ML-like language with by-name
semantics for polymorphism: polymorphic objects are not
evaluated once for all at generalization time, but
re-evaluated at each specialization. Unlike the standard ML
semantics, the by-name semantics works well with polymorphic
references and polymorphic continuations: the naive typing
rules for references and for continuations are sound with
respect to this semantics. Polymorphism by name leads to a
better integration of these imperative features into the ML
type discipline. Practical experience shows that it retains
most of the efficiency and predictability of polymorphism by
value.},
  xtopic = {types},
  xranking = {top}
}
@techreport{Leroy-repres,
  author = {Xavier Leroy},
  title = {Efficient data representation
                         in polymorphic languages},
  institution = {INRIA},
  type = {Research report},
  number = {1264},
  year = {1990},
  urllocal = {https://xavierleroy.org/publi/data-representation.pdf},
  abstract = {Languages with polymorphic types (e.g. ML) have traditionally been
implemented using Lisp-like data representations---everything has to
fit in one word, if necessary by being heap-allocated and handled
through a pointer. The reason is that, in contrast with conventional
statically-typed languages such as Pascal, it is not possible to
assign one unique type to each expression at compile-time, an absolute
requirement for using more efficient representations (e.g.
unallocated multi-word values). In this paper, we show how to take
advantage of the static polymorphic typing to mix correctly two styles
of data representation in the implementation of a polymorphic
language: specialized, efficient representations are used when types
are fully known at compile-time; uniform, Lisp-like representations
are used otherwise.},
  xtopic = {typecomp}
}
@inproceedings{Leroy-repres2,
  author = {Xavier Leroy},
  title = {Efficient data representation
                         in polymorphic languages},
  booktitle = {PLILP 1990: Programming Language Implementation
                         and Logic Programming},
  series = {LNCS},
  publisher = {Springer},
  number = {456},
  year = {1990},
  urllocal = {https://xavierleroy.org/publi/data-representation-plilp.pdf},
  abstract = {Short version of \cite{Leroy-repres}},
  xtopic = {typecomp}
}
@incollection{Leroy-Rouaix-99,
  author = {Xavier Leroy and Fran\c{c}ois Rouaix},
  title = {Security properties of typed applets},
  booktitle = {Secure Internet Programming -- Security issues for
                         Mobile and Distributed Objects},
  publisher = {Springer},
  editor = {J. Vitek and C. Jensen},
  number = {1603},
  pages = {147--182},
  series = {LNCS},
  year = 1999,
  urllocal = {https://xavierleroy.org/publi/sip-typed-applets.pdf},
  urlpublisher = {https://doi.org/10.1007/3-540-48749-2_7},
  doi = {10.1007/3-540-48749-2_7},
  abstract = {This paper formalizes the folklore result that strongly-typed applets
are more secure than untyped ones.  We formulate and prove several
security properties that all well-typed applets possess, and identify
sufficient conditions for the applet execution environment to be
safe, such as procedural encapsulation, type abstraction, and
systematic type-based placement of run-time checks. These results are
a first step towards formal techniques for developing and validating
safe execution environments for applets.},
  xtopic = {security}
}
@inproceedings{Leroy-Rouaix-applets,
  author = {Xavier Leroy and Fran\c{c}ois Rouaix},
  title = {Security properties of typed applets},
  booktitle = {POPL 1998: 25th symposium Principles of Programming Languages},
  publisher = {ACM},
  year = 1998,
  pages = {391--403},
  urllocal = {https://xavierleroy.org/publi/typed-applets.pdf},
  urlpublisher = {https://doi.org/10.1145/268946.268979},
  doi = {10.1145/268946.268979},
  abstract = {Preliminary version of \cite{Leroy-Rouaix-99}},
  xtopic = {security},
  xranking = {top}
}
@phdthesis{Leroy-these,
  author = {Xavier Leroy},
  title = {Typage polymorphe d'un langage algorithmique},
  type = {PhD thesis (in French)},
  school = {Universit\'e Paris 7},
  year = {1992},
  urllocal = {https://xavierleroy.org/publi/these-doctorat.pdf},
  abstract = {Le typage statique avec types polymorphes, comme dans le
langage ML, s'adapte parfaitement aux langages purement
applicatifs, leur apportant souplesse et expressivité. Mais il
ne s'étend pas naturellement au trait principal des langages
algorithmiques: la modification en place des structures de
données.  Des difficultés de typage similaires apparaissent
avec d'autres extensions des langages applicatifs: les
variables logiques, la communication inter-processus à travers
des canaux, et la manipulation de continuations en tant que
valeurs. Ce travail étudie, dans le cadre de la sémantique
relationnelle, deux nouvelles approches du typage polymorphe
de ces constructions non-applicatives. La première repose sur
une restriction de l'opération de généralisation des types (la
notion de variables dangereuses), et sur un typage plus fin
des valeurs fonctionnelles (le typage des fermetures). Le
système de types obtenu reste compatible avec le noyau
applicatif de ML, et se révèle être le plus expressif parmi
les systèmes de types pour ML plus traits impératifs proposés
jusqu'ici. La seconde approche repose sur l'adoption d'une
sémantique ``par nom'' pour les constructions du
polymorphisme, au lieu de la sémantique ``par valeur''
usuelle. Le langage obtenu s'écarte de ML, mais se type très
simplement avec du polymorphisme. Les deux approches rendent
possible l'interaction sans heurts entre les traits
non-applicatifs et le typage polymorphe.
(See \cite{Leroy-thesis} for an English translation.)},
  xtopic = {types}
}
@techreport{Leroy-thesis,
  author = {Xavier Leroy},
  title = {Polymorphic typing of an algorithmic language},
  type = {Research report},
  number = {1778},
  institution = {INRIA},
  year = {1992},
  urllocal = {https://xavierleroy.org/publi/phd-thesis.pdf},
  hal = {inria-00077018},
  urlhal = {https://hal.science/inria-00077018},
  abstract = {The polymorphic type discipline, as in the ML language, fits
well within purely applicative languages, but does not extend
naturally to the main feature of algorithmic languages:
in-place update of data structures. Similar typing
difficulties arise with other extensions of applicative
languages: logical variables, communication channels,
continuation handling. This work studies (in the setting of
relational semantics) two new approaches to the polymorphic
typing of these non-applicative features. The first one relies
on a restriction of generalization over types (the notion of
dangerous variables), and on a refined typing of functional
values (closure typing). The resulting type system is
compatible with the ML core language, and is the most
expressive type systems for ML with imperative features
proposed so far. The second approach relies on switching to
``by-name'' semantics for the constructs of polymorphism,
instead of the usual ``by-value'' semantics. The resulting
language differs from ML, but lends itself easily to
polymorphic typing. Both approaches smoothly integrate
non-applicative features and polymorphic typing.
(English translation of \cite{Leroy-these}.)},
  xtopic = {types}
}
@inproceedings{Leroy-unboxed,
  author = {Xavier Leroy},
  title = {Unboxed objects and polymorphic typing},
  booktitle = {POPL 1992: 19th symposium Principles of Programming Languages},
  publisher = {ACM},
  year = {1992},
  pages = {177-188},
  urllocal = {https://xavierleroy.org/publi/unboxed-polymorphism.pdf},
  urlpublisher = {https://doi.org/10.1145/143165.143205},
  doi = {10.1145/143165.143205},
  abstract = {This paper presents a program transformation that allows
languages with polymorphic typing (e.g. ML) to be implemented
with unboxed, multi-word data representations, more efficient
than the conventional boxed representations. The
transformation introduces coercions between various
representations, based on a typing derivation. A prototype ML
compiler utilizing this transformation demonstrates important
speedups.},
  xtopic = {typecomp},
  xranking = {top}
}
@inproceedings{Leroy-unboxing,
  author = {Xavier Leroy},
  title = {The effectiveness of type-based unboxing},
  booktitle = {TIC 1997: workshop Types in Compilation},
  publisher = {Technical report BCCS-97-03, Boston College,
                         Computer Science Department},
  year = 1997,
  month = jun,
  urllocal = {https://xavierleroy.org/publi/unboxing-tic97.pdf},
  abstract = {We compare the efficiency of type-based unboxing strategies with that
of simpler, untyped unboxing optimizations, building on our practical
experience with the Gallium and Objective Caml compilers. We find the
untyped optimizations to perform as well on the best case and
significantly better in the worst case.},
  xtopic = {typecomp},
  xranking = {workshop}
}
@techreport{Leroy-unix,
  author = {Xavier Leroy},
  title = {Programmation du syst\`eme {Unix} en {Caml} {Light}},
  type = {Technical report},
  number = {147},
  institution = {INRIA},
  year = {1992},
  urllocal = {https://xavierleroy.org/publi/unix-in-caml.pdf},
  hal = {inria-00070021},
  urlhal = {https://hal.science/inria-00070021},
  abstract = {Ce rapport est un cours d'introduction à la programmation du système
Unix, mettant l'accent sur la communication entre les processus. La
principale nouveauté de ce travail est l'utilisation du langage Caml
Light, un dialecte du langage ML, à la place du langage C qui est
d'ordinaire associé à la programmation système. Ceci donne des points
de vue nouveaux à la fois sur la programmation système et sur le
langage ML.},
  xtopic = {caml}
}
@book{Leroy-Weis-manuel,
  author = {Xavier Leroy and Pierre Weis},
  title = {Manuel de r\'ef\'erence du langage {Caml}},
  publisher = {Inter\'Editions},
  year = {1993},
  pagetotal = {166},
  isbn = {2-7296-0492-8},
  urllocal = {http://caml.inria.fr/pub/distrib/books/manuel-cl.pdf},
  xtopic = {caml},
  abstract = {Écrit par deux des implémenteurs du compilateur Caml Light,
 ce livre décrit de manière exhaustive toutes les
 constructions du langage de programmation et fournit une
 documentation complète du système Caml Light}
}
@inproceedings{Leroy-Weis-refs,
  author = {Xavier Leroy and Pierre Weis},
  title = {Polymorphic type inference and assignment},
  booktitle = {POPL 1991: 18th symposium Principles of Programming Languages},
  publisher = {ACM},
  year = {1991},
  pages = {291--302},
  urllocal = {https://xavierleroy.org/publi/polymorphic-assignment.pdf},
  urlpublisher = {https://doi.org/10.1145/99583.99622},
  doi = {10.1145/99583.99622},
  abstract = {This paper present a new approach to the polymorphic typing of data
accepting in-place modification in ML-like languages. This
approach is based on restrictions over type generalization,
and a refined typing of functions. The type system given here
leads to a better integration of imperative programming style
with the purely applicative kernel of ML.  In particular,
generic functions that allocate mutable data can safely be
given fully polymorphic types. We show the soundness of this
type system, and give a type reconstruction algorithm.},
  xtopic = {types},
  xranking = {top}
}
@techreport{Leroy-ZINC,
  author = {Xavier Leroy},
  title = {The {ZINC} experiment: an economical
                         implementation of the {ML} language},
  institution = {INRIA},
  type = {Technical report},
  number = {117},
  year = {1990},
  urllocal = {https://xavierleroy.org/publi/ZINC.pdf},
  abstract = {This report details the design and implementation of the ZINC
system.  This is an experimental implementation of the ML
language, which has later evolved in the Caml Light system.
This system is strongly oriented toward separate compilation
and the production of small, standalone programs; type safety
is ensured by a Modula-2-like module system.  ZINC uses
simple, portable techniques, such as bytecode interpretation;
a sophisticated execution model helps counterbalance the
interpretation overhead.},
  xtopic = {caml}
}
@article{Pessaux-Leroy-exn,
  author = {Xavier Leroy and Fran\c{c}ois Pessaux},
  title = {Type-based analysis of uncaught exceptions},
  journal = {ACM Transactions on Programming Languages and Systems},
  pages = {340--377},
  volume = 22,
  number = 2,
  year = 2000,
  urllocal = {https://xavierleroy.org/publi/exceptions-toplas.pdf},
  urlpublisher = {https://doi.org/10.1145/349214.349230},
  doi = {10.1145/349214.349230},
  abstract = {This paper presents a program analysis to estimate uncaught exceptions
in ML programs.  This analysis relies on unification-based type inference
in a non-standard type system, using rows to approximate both the flow
of escaping exceptions (a la effect systems) and the flow of result
values (a la control-flow analyses).  The resulting analysis is
efficient and precise; in particular, arguments carried by
exceptions are accurately handled.},
  xtopic = {analysis}
}
@inproceedings{Pessaux-Leroy-exn-popl,
  author = {Fran\c{c}ois Pessaux and Xavier Leroy},
  title = {Type-based analysis of uncaught exceptions},
  booktitle = {POPL 1999: 26th symposium Principles of Programming Languages},
  publisher = {ACM},
  pages = {276--290},
  year = 1999,
  urllocal = {https://xavierleroy.org/publi/exceptions-popl.pdf},
  urlpublisher = {https://doi.org/10.1145/292540.292565},
  doi = {10.1145/292540.292565},
  abstract = {Preliminary version of \cite{Pessaux-Leroy-exn}},
  xtopic = {analysis},
  xranking = {top}
}
@book{Weis-Leroy-Caml,
  author = {Pierre Weis and Xavier Leroy},
  title = {Le langage {Caml}},
  edition = {2},
  publisher = {Dunod},
  year = {1999},
  pagetotal = {370},
  urllocal = {http://caml.inria.fr/pub/distrib/books/llc.pdf},
  xtopic = {caml},
  isbn = {2-10-004383-8},
  abstract = {Ce livre permet d'aborder la programmation en Caml de façon
 simple et concrète.  Véritable cours de programmation, il 
 introduit progressivement les mécanismes du langage et les
 montre à l'oeuvre face aux problèmes fondamentaux de la
 programmation. Outre de nombreux exemples introductifs, ce
 livre détaille la conception et la réalisation de
 six programmes complets et réalistes illustrant
 des domaines réputés difficiles: compilation, synthèse de types,
 automates, etc.}
}
@inproceedings{Calcagno-Taha-Huang-Leroy-03,
  author = {Cristiano Calcagno and Walid Taha and
                         Liwen Huang and Xavier Leroy},
  title = {Implementing multi-stage languages using {ASTs},
                         gensym, and reflection},
  booktitle = {GPCE 2003: Generative Programming and Component Engineering},
  year = 2003,
  series = {LNCS},
  number = 2830,
  publisher = {Springer},
  pages = {57--76},
  urllocal = {http://www.cs.rice.edu/~taha/publications/conference/gpce03b.pdf},
  urlpublisher = {https://doi.org/10.1007/978-3-540-39815-8_4},
  doi = {10.1007/978-3-540-39815-8_4},
  xtopic = {caml},
  abstract = {The paper addresses theoretical and practical aspects of
implementing multi-stage languages using abstract syntax trees (ASTs),
gensym, and reflection. We present an operational account of the
correctness of this approach, and report on our experience with a
bytecode compiler called MetaOCaml that is based on this
strategy. Current performance measurements reveal interesting
characteristics of the underlying OCaml compiler, and illustrate why
this strategy can be particularly useful for implementing
domain-specific languages in a typed, functional setting.}
}
@article{Pseudoknot-96,
  author = {P. H. Hartel and
                M. Feeley and
                M. Alt and
                L. Augustsson and
                P. Baumann and
                M. Beemster and
                E. Chailloux and
                C. H. Flood and
                W. Grieskamp and
                J. H. G.\ van Groningen and
                K. Hammond and
                B. Hausman and
                M. Y. Ivory and
                R. E. Jones and
                J. Kamperman and
                P. Lee and
                X. Leroy and
                R. D. Lins and
                S. Loosemore and
                N. R\"ojemo and
                M. Serrano and
                J.-P. Talpin and
                J. Thackray and
                S. Thomas and
                P. Walters and
                P. Weis and
                P. Wentworth},
  title = {Benchmarking implementations of functional languages with {``Pseudoknot''}, a float-intensive benchmark},
  journal = {Journal of Functional Programming},
  volume = {6},
  number = {4},
  pages = {621-655},
  year = {1996},
  doi = {https://doi.org/10.1017/S0956796800001891},
  abstract = {Over 25 implementations of different functional languages are
benchmarked using the same program, a floating-point intensive
application taken from molecular biology. The principal aspects
studied are compile time and execution time for the various
implementations that were benchmarked. An important consideration is
how the program can be modified and tuned to obtain maximal
performance on each language implementation.},
  xtopic = {caml}
}
@proceedings{Proceedings-CPP-2015,
  editor = {Xavier Leroy and Alwen Tiu},
  title = {Proceedings of the 2015 Conference on Certified Programs and Proofs,
               {CPP} 2015, Mumbai, India, January 15-17, 2015},
  publisher = {ACM},
  year = {2015},
  urlpublisher = {http://dl.acm.org/citation.cfm?id=2676724},
  isbn = {1-4503-3296-5}
}
@proceedings{Proceedings-ML-2005,
  editor = {Nick Benton and Xavier Leroy},
  title = {{ML 2005}: Proceedings of the {ACM SIGPLAN} Workshop on {ML}},
  series = {ENTCS},
  number = {148(2)},
  year = {2006},
  urlpublisher = {http://dx.doi.org/10.1016/j.entcs.2005.11.037},
  doi = {10.1016/j.entcs.2005.11.037}
}
@proceedings{Proceedings-POPL-2004,
  editor = {Neil D. Jones and Xavier Leroy},
  title = {Proceedings of the 31st {ACM} {SIGPLAN-SIGACT} Symposium on Principles of Programming Languages, {POPL} 2004, Venice, Italy, January 14-16, 2004},
  publisher = {ACM},
  year = {2004},
  urlpublisher = {http://dl.acm.org/citation.cfm?id=964001},
  isbn = {1-58113-729-X}
}
@proceedings{Proceedings-ICFP-2001,
  editor = {Benjamin C. Pierce and Xavier Leroy},
  title = {Proceedings of the Sixth {ACM} {SIGPLAN} International Conference
               on Functional Programming, {ICFP} '01, Firenze (Florence), Italy,
               September 3-5, 2001},
  publisher = {ACM},
  year = {2001},
  isbn = {1-58113-415-0}
}
@proceedings{Proceedings-TIC-1998,
  editor = {Xavier Leroy and Atsushi Ohori},
  title = {Types in Compilation, Second International Workshop, {TIC} '98, Kyoto,
               Japan, March 25-27, 1998, Proceedings},
  series = {LNCS},
  number = {1473},
  publisher = {Springer},
  year = {1998},
  isbn = {3-540-64925-5}
}
@proceedings{Proceedings-Dechiffrements,
  title = {D\'{e}chiffrement(s): des hi\'{e}roglyphes \`{a} l'ADN},
  editor = {Charpin, Dominique and Leroy, Xavier},
  urlhal = {https://hal.science/hal-04210865},
  publisher = {{Odile Jacob}},
  series = {Colloque annuel du Coll\`{e}ge de France},
  year = {2023},
  month = sep,
  isbn = {978-2-415-00687-7}
}
@patent{Leroy-patent-BV,
  author = {Xavier Leroy},
  title = {Management protocol, method for verifying and transforming a downloaded programme fragment and corresponding systems / {Protocole} de gestion, procédé de vérification et de transformation d'un fragment de programme téléchargé et systèmes correspondants},
  holder = {{Trusted Logic}},
  number = {WO 2001 014958 A3},
  year = {2001},
  month = dec
}
@patent{Leroy-patent-refs,
  author = {Xavier Leroy and Patrice Hameau and Nicolas Regnault and Renaud Marlet},
  title = {Control of data access by dynamically verifying legal references / {Contrôle} d'accès aux données par vérification dynamique des références licites},
  holder = {{Trusted Logic}},
  number = {WO 2005 073827},
  year = {2005},
  month = aug
}
@patent{Leroy-patent-trace,
  author = {Dominique Bolignano and Xavier Leroy and Renaud Marlet},
  title = {Method for controlling program execution integrity by verifying execution trace prints / {Procédé} de contrôle d'intégrité de programmes par vérification d'empreintes de traces d'exécution},
  holder = {{Trusted Logic}},
  number = {WO 2005 073859},
  year = {2006},
  month = apr
}

This file was generated by bibtex2html 1.99.