%                                                   -*- outline -*-
% Time-stamp: <Sat Aug 14 2010 00:15:00 Stardate: Stardate: [-28]3524.63 hwloidl> 
%
% My own publications.
% ---------------------------------------------------------------------------

@String{AI =     "Artificial Intelligence"}
@String{Acta =   "Acta Informatica"}
@String{CACM =   "Communications of the {ACM}"}
@String{CJ =     "Computer Journal"}
@String{EATCS =  "Bulletin of the European Association for Theoretical
                 Computer Science"}
@String{ESOP =   "European Symposium on Programming"}
@String{FPCA =   "International Conference on Functional Programming
                 Languages and Computer Architecture"}
@String{IFIP =   "IFIP World Congress Proceedings"}
@String{IPL =    "Information Processing Letters"}
@String{JACM =   "Journal of the {ACM}"}
@String{JCSS =   "Journal of Computer and System Sciences"}
@String{JFP =    "J.\ of Functional Programming"}
@String{JFLP =   "Journal of Functional and Logic Programming"}
@String{JPDC =   "Journal of Parallel and Distributed Computing"}
@String{JSC =    "Journal of Symbolic Computation"}
@String{LSC =    "Lisp and Symbolic Computation"}
@String{LFP =    "{ACM} Conference on Lisp and Functional Programming"}
@String{LICS =   "{IEEE} Symposium on Logic in Computer Science"}
@String{LNCS =   "LNCS"} %%% Lecture Notes in Computer Science"}
@String{MFCS =   "Mathematical Foundations of Computer Science"}
@String{MFPLS =  "Mathematical Foundations of Programming Language
                 Semantics"}
@String{NGC =    "New Generation Computing"}
@String{PEMC =   "Partial Evaluation and Mixed Computation"}
@String{PEPM =   "Partial Evaluation and Semantics-Based Program
                 Manipulation, New Haven, Connecticut. (Sigplan Notices,
                 vol. 26, no. 9, September 1991)"}
@String{PDO =    "Programs as Data Objects, Copenhagen, Denmark.
                 (Lecture Notes in Computer Science, vol. 217)"}
@String{POPL =   "ACM Symposium on Principles of Programming Languages"}
@String{PLDI =   "Programming Language Design and Implementation"}
@String{SCP =    "Science of Computer Programming"}
@String{SIGPLAN = "Sigplan Notices"}
@String{SMD =    "Soviet Mathematics Doklady"}
@String{CPE =    "Concurrency -- Practice and Experience"}
@String{SPE =    "Software -- Practice and Experience"}
@String{TCS =    "Theoretical Computer Science"}
@String{TOPLAS = "ACM Transactions on Programming Languages and
                 Systems"}
@String{TSE =    "IEEE Transactions on Software Engineering"}
% Publishers
@String{A-W =    "Addison-Wesley"}
@String{AP =     "Academic Press"}
@String{CSP =    "Computer Science Press"}
@String{CUP =    "Cambridge University Press"}
@String{JWS =    "John Wiley \& Sons"}
@String{MIT =    "MIT Press"}
@String{N-H =    "North-Holland"}
@String{OUP =    "Oxford University Press"}
@String{P-H =    "Prentice-Hall"}
@String{S-V =    "Springer"}
@String{SL =     "Studentlitteratur, Lund, Sweden"}
@String{WHF =    "W.H. Freeman"}
% Special for the PFP paper
@String{PPL =    "Parallel Processing Letters"}
@String{WSP =    "World Scientific Publishing"}
@String{INTELL = "Intellect"}
%% LMU period


@InProceedings{Haskell10,
  author =       {S. Marlow and P. Maier and H-W. Loidl and M.K. Aswad and P. Trinder},
  title = 	 {{Seq no more: Better Strategies for Parallel Haskell}},
  booktitle =    {Haskell'10 --- Haskell Symposium},
  year = 	 2010,
  address = 	 {Baltimore MD, U.S.A.},
  month = 	 sep,
  publisher =    {ACM Press},
  keywords =     {GpH, parallel Haskell, evaluation strategies},
  url =          {http://www.macs.hw.ac.uk/~hwloidl/publications/strategies10.pdf},
  abstract = "
  We present a complete redesign of evaluation strategies, a key
  abstraction for specifying pure, deterministic parallelism in
  Haskell.  Our new formulation preserves the compositionality and
  modularity benefits of the original, while providing significant new
  benefits.  First, we introduce an
  evaluation-order monad to provide clearer, more generic,
  and more efficient specification of parallel evaluation.  Secondly,
  the new formulation resolves a subtle space management issue with
  the original strategies, allowing parallelism (sparks) to be
  preserved while reclaiming heap associated with superfluous
  parallelism.  Related to this, the new formulation provides far
  better support for speculative parallelism as the garbage collector
  now prunes unneeded speculation.  Finally, the new formulation
  provides improved compositionality: we can directly express
  parallelism embedded within lazy data structures, producing more
  compositional strategies, and our basic strategies are parametric in
  the coordination combinator, facilitating a richer set of
  parallelism combinators.

  We give measurements over a range of benchmarks demonstrating that
  the runtime overheads of the new formulation relative to the
  original are low, and the new strategies even yield slightly better
  speedups on average than the original strategies."
}

@InProceedings{IFL10,
  author = 	 {C. Brown and H-W. Loidl and J. Berthold and K. Hammond},
  title = 	 {{Improving your CASH flow: The Computer Algebra SHell (Extended Abstract)}},
  booktitle =	 {{IFL'10 ---  Intl.\ Workshop on the Implementation of Functional Languages}},
  year = 	 2010,
  month = 	 sep,
  note = 	 {Draft Proceedings},
  keywords =     {parallel applications, symbolic computation},
  url =          {http://www.cs.st-andrews.ac.uk/~hwloidl/SCIEnce/SymGrid-Par/ifl2010.pdf},
  abstract =     "
Some important challenges in the field of symbolic computation ---and functional programming--- are the transparent access to complex, 
mathematical software, the exchange of data between independent systems with specialised tasks and the exploitation of modern 
parallel hardware. One attempt to solve this problem is SymGrid-Par, a system for exploiting parallel hardware in the context of computer algebra. 
Specifically, SymGrid-Par provides an easy-to-use platform for parallel computation that connects several underlying computer algebra systems, communicating
through a standardised protocol for symbolic computation. 

In this paper we describe a new component of SymGrid-Par known as CaSH: the Computer Algebra SHell. CaSH is a system that
allows direct access to SymGrid-Par via GHCi. CaSH thus allows Haskell programmers to exploit high-performance parallel computations using a system designated
for solving problems in computer algebra; whilst still maintaining the purity and express-ability offered by the Haskell environment. We demonstrate access to both sequential and parallel services of SymGrid-Par.
For the latter we use parallel skeletons, implemented in the Haskell dialect of Eden; these skeletons are called from CaSH but exploit a computational algebra system known as
GAP to offload the mathematical complexity."
}


@Article{SymGridParDemo,
  author = 	 {The SCIEnce project},
  title = 	 {{SymGrid-Par: Parallel Orchestration of Symbolic Computation Systems}},
  journal = 	 {{Communications of Computer Algebra}},
  year = 	 2010,
  note = 	 {To appear},
  annote = 	 {Extended abstract for ISSAC demo session},
  keywords =     {parallel applications, symbolic computation},
  url =          {http://www.cs.st-andrews.ac.uk/~hwloidl/SCIEnce/SymGrid-Par/demo.pdf},
}

@InProceedings{POPL10,
  author =       {Jost, S. and  Hammond, K. and Loidl, H-W.  and  Hofmann, M.},
  title = 	 {{Static Determination of Quantitative Resource Usage for Higher-Order Programs}},
  booktitle =	 {{POPL~'10 --- Symp. on Principles of Prog. Langs.}},
  address = 	 {Madrid, Spain},
  pages =        {223--236},
  year  =        {2010},
  month = 	 jan,
  keywords =     {Resource analysis, Hume},
  doi =          {http://doi.acm.org/10.1145/1707801.1706327},
  url =          {http://www2.tcs.ifi.lmu.de/~hwloidl/publications/POPL10.pdf},
  abstract = "
We describe a new automatic static analysis for
determining upper-bound functions on the use of quantitative
resources for strict, higher-order, polymorphic, recursive programs
dealing with possibly-aliased data.
Our analysis is a variant of Tarjan's manual amortised  cost analysis technique.
We use a type-based approach, exploiting linearity to allow inference, 
and place a new emphasis on the number of references to a data object.
The bounds we infer depend on the sizes of the various inputs to a program.
They thus expose the impact of specific inputs on the overall cost behaviour.

The key novel aspect of our work is that it
deals directly with polymorphic higher-order functions without requiring source-level
transformations that could alter resource usage.  We thus obtain safe
and accurate compile-time bounds. Our work is generic in that it deals with
a variety of quantitative resources.  We illustrate our approach with
reference to dynamic memory allocations/deallocations, stack usage, and
worst-case execution time, using metrics taken from a real implementation
on a simple micro-controller platform that is used in safety-critical
automotive applications."
}

@InProceedings{FOPARA09,
  author = 	 {Loidl, H-W. and Jost, S.},
  title = 	 {{Improvements to a Resource Analysis for Hume}},
  booktitle =    {{FOPARA~'09 --- Intl. Workshop on Foundational and Practical Aspects of Resource Analysis}},
  year = 	 2009,
  series = 	 {LNCS~6324},
  address = 	 {Eindhoven, The Netherlands},
  month = 	 nov,
  keywords =     {Resource analysis, Hume},
  publisher =    S-V,
  url =          {http://www2.tcs.ifi.lmu.de/~hwloidl/publications/FOPARA09.pdf},
  abstract = "
The core of our resource analysis for the embedded systems language Hume is
a resource-generic, type-based inference engine that employs the concept
of amortised costs to statically infer resource bounds.
In this paper we present extensions and improvements of this resource analysis
in several ways. 
We develop and assess a call count analysis for higher-order programs, as a specific
instance of our inference engine.
We address usability aspects in general and in particular discuss an improved presentation of the 
inferred resource bounds together with the possibility of interactively
tuning these bounds.
Finally, we demonstrate improvements in the performance of our analysis.
"
}

@InProceedings{FOPARA09a,
  author = 	 {P.W. Trinder and M.I. Cole and H-W. Loidl and G.J. Michaelson},
  title = 	 {{Characterising Effective Resource Analyses for Parallel and Distributed Coordination}},
  booktitle =    {{FOPARA~'09 --- Intl. Workshop on Foundational and Practical Aspects of Resource Analysis}},
  year = 	 2009,
  series = 	 {LNCS~6324},
  address = 	 {Eindhoven, The Netherlands},
  month = 	 nov,
  keywords =     {Resource analysis},
  publisher =    S-V,
  url =          {http://www2.tcs.ifi.lmu.de/~hwloidl/publications/ResAn_FOPARA09.pdf},
  abstract = "
  An important application of resource analysis is to improve the
  performance of parallel and distributed programs. In this context key
  resources are time, space and communication. Given the spectrum of
  cost models and associated analysis techniques available, what combination
  should be selected for a specific parallel or distributed
  context?

  We address the question as follows. We outline a continuum of
  coordination cost models and a range of analysis techniques. We
  consider six representative parallel/distributed applications of
  resource analysis techniques, and aim to extract general principles
  governing why the combination of techniques is effective in its
  context.
"
}

@InProceedings{ECRTS09,
  author = 	 {Jost, S. and Loidl, H-W. and Scaife, N. and Hammond, K. and Michaelson, G. and Hofmann, M.},
  title = 	 {{Worst-Case Execution Time Analysis through Types}},
  booktitle =    {{ECRTS'09 --- 21st Euromicro Conf.\ on Real-Time Systems}},
  pages = 	 {13--16},
  year = 	 2009,
  address = 	 {Dublin, Ireland, July 1--3},
  publisher =    {ACM},
  keywords =     {Resource analysis, Hume},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/ECRTS09.pdf},
  note = 	 {Work-in-Progress Session},
  abstract = "
We construct a fully automatic static WCET analysis 
suitable for real-time embedded systems applications
by augmenting a high-level static analysis technique (originally aimed at heap-space)
with a machine-level worst-case execution time tool.
We evaluate this approach by studying two typical 
and realistic real-time control applications,
using a readily available commercial microcontroller."
}

%%% Article{JHLH09,
%%%   author = 	 {S. Jost and K. Hammond and H-W. Loidl and M. Hofmann},
%%%   title = 	 {{``Carbon Credits'' for Resource-bounded Computations}},
%%%   journal = 	 {Higher-order and Symbolic Computation},
%%%   year = 	 2009,
%%%   note =	 {In preparation}
%%% }

@InProceedings{JHLH09,
  author = 	 {S. Jost and H-W. Loidl and K. Hammond and N. Scaife and M. Hofmann},
  title = 	 {{``Carbon Credits'' for Resource-bounded Computations using Amortised Analysis}},
  booktitle =	 {{FM09 --- 16th International Symposium on  Formal Methods}},
  series =       {LNCS~5850},
  pages     =    {354--369},
  publisher =    S-V,
  year =	 2009,
  address =	 {Eindhoven, The Netherlands,  November 2--6},
  keywords =     {Resource analysis, Hume},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/AnalysisPaper.pdf},
  abstract = "
Bounding resource usage is important for a number of areas, notably
real-time embedded systems and safety-critical systems. 
In this paper, we present a fully automatic static type-based analysis
for inferring upper bounds on resource usage for
programs involving general algebraic datatypes and full recursion.
Our method can easily be used to bound any countable resource,
without needing to revisit proofs.
We apply the analysis to the important metrics of worst-case
execution time, stack- and heap-space usage. Our results from
several realistic embedded control applications 
demonstrate good matches between our inferred bounds and
measured worst-case costs for heap and stack usage. For time usage
we infer good bounds for one application.  Where we obtain
less tight bounds, this is due to the use of software floating-point libraries.
"
}

%% InProceedings{LoBe09,
%%   author = 	 {H-W. Loidl and L. Beringer},
%%   title = 	 {{A Resource Logic for Hume}},
%%   booktitle =	 {{FM09 --- 16th International Symposium on  Formal Methods}},
%%   year =	 2009,
%%   address =	 {Eindhoven, The Netherlands,  November 2--6},
%%   note =	 {Submitted}
%% }

%% InProceedings{GMLHJ09,
%%   author = 	 {G. Grov and G. Michaelson and H-W. Loidl and C. Herrmann and S. Jost},
%%   title = 	 {{An Application of Hume Analysis to Imperative Programs with Pointers}},
%%   booktitle =	 {{TFP09 --- Trends in Functional Programing}},
%%   year =	 2009,
%%   address =	 {Komarno, Slovakia  June 2--4},
%%   note =	 {Submitted}
%% }

@InProceedings{LMJB09,
  author = 	 {H-W. Loidl and K. MacKenzie and S. Jost and L. Beringer},
  title = 	 {{A Proof-carrying-code Infrastructure for Resources}},
  booktitle =	 {{LADC'09 --- Latin-American Symposium on Dependable Computing}},
  year =	 2009,
  address =	 {Joćo Pessoa, Brazil, Sep 1--4},
  keywords =     {PCC},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/PCC.pdf},
  abstract = "
  This paper tackles the issue of increasing dependability
  of distributed systems in the presence of mobile code.
  To this end we present a complete Proof-carrying-code (PCC) infrastructure for
  independent and automatic certification of resource bounds of mobile JVM programs.
  This includes a certifying compiler for a high-level language, which
  produces a certificate of bounded heap consumption, and independent
  certificate validation, realised via proof-checking, on the code-consumer
  side. Thus, we are now in a position to automatically infer linear upper bounds on
  the heap consumption of a strict, first-order functional language, generate a
  certificate encoding a formal proof of such bounded heap consumption and
  independently validate this certificate at the consumer side by checking the
  certificate. This prevents mobile code from exhausting resources on the local machine."
}

%% InProceedings{LoGr09,
%%   author = 	 {H-W. Loidl and G. Grov},
%%   title = 	 {{A Reasoning Infrastructure for the Embedded Systems Language Hume}},
%%   booktitle =	 {{TPHOL09 --- Intl. Conf. on Theorem Proving in Higher Order Logics}},
%%   year =	 2009,
%%   address =	 {Munich, Germany, August, 2009},
%%   note =	 {Submitted}
%% }

@InProceedings{GMJHHL09,
  author = 	 {G. Grov and G. Michaelson and S. Jost and C. Herrmann and K. Hammond and H-W. Loidl},
  title = 	 {{An Application of Hume Analysis to Imperative Programs}},
  booktitle =	 {{SETP09 --- Intl.  Conf.  on Software Engineering Theory and Practice}},
  year =	 2009,
  address =	 {Orlando, FL, USA, July 13--16, 2009},
  keywords =     {Resource analysis, Hume},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/HumeMiniC.pdf},
}

@InProceedings{ZGMHJL09,
  author = 	 {A. {Al Zain} and V. Gibson and G. Michaelson and K. Hammond and S. Jost and H-W. Loidl},
  title = 	 {{Towards Hume SIMD Vectorisation}},
  booktitle =	 {{EUSIPCO'09 --- European Signal Processing Conference}},
  year =	 2009,
  address =	 {Glasgow, Scotland, August 24--28},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/eusipco2009_Hume.pdf},
  keywords =     {Hume},
}


@InProceedings{WCET07,
  author = 	 {A. Bonenfant and K. Hammond and C.A. Herrmann and S. Jost and H-W. Loidl and R. Pointon},
  title = 	 {{Automatic Amortised Worst-Case Execution Time Analysis}},
  booktitle = {{WCET'07 --- Intl Workshop on Worst-Case Execution Time Analysis}},
  pages = 	 {13--18},
  year = 	 2007,
  address = 	 {Pisa, Italy},
  month = 	 jul,
  keywords =     {Resource analysis, Hume},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/WCET07.pdf},
  ALTurl =       {http://drops.dagstuhl.de/opus/volltexte/2007/1186},
}


@InProceedings{WCET06,
  author = 	 {K. Hammond and C. Ferdinand and R. Heckmann and R. Dyckhoff and M. Hoffmann and S. Jost and 
                  H-W. Loidl and G. Michaelson and R. Pointon and N. Scaife and J. Serot and A. Wallace},
  title = 	 {{Towards Formally Verifiable Resource Bounds for Real-Time Embedded Systems}},
  booktitle =    {{WCET'06 --- Intl Workshop on Worst-Case Execution Time Analysis}},
  year = 	 2006,
  address = 	 {Dresden, Germany},
  month = 	 jul,
  keywords =     {Resource analysis, Hume},
  url =          {http://drops.dagstuhl.de/opus/volltexte/2006/677},
  ALTurl =       {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/WCET06.pdf},
  abstract = "
This paper describes ongoing work aimed at the construction of formal
cost models and analyses to yield verifiable guarantees of resource usage
in the context of real-time embedded systems.  Our work is conducted in
terms of the domain-specific language Hume, a language that combines
functional programming for computations with finite-state
automata for specifying reactive systems.  We outline an approach in
which high-level information derived from source-code analysis can be
combined with worst-case execution time information obtained from
high quality abstract interpretation of low-level binary code.
"
}

@InBook{LTHZB08,
  author =	 {H-W. Loidl and P.W. Trinder and K. Hammond, A.D. {Al Zain} and C. Baker-Finch},
  OPTeditor =	 {Michael Alexander and Bill Gardner},
  title = 	 {{Process Algebra for Parallel and Distributed Processing: Algebraic Languages in Specification-Based Software Development}},
  chapter = 	 {{Semi-Explicit Parallel Programming in a Purely Functional Style: GpH}},
  publisher = 	 {Chapman and Hall},
  year = 	 2008,
  keywords =     {GpH, parallel Haskell},
  abstract = "
Declarative programming languages can play an important role in the process
of designing and implementing parallel systems. They bridge the gap between
a high-level  specification, with proven properties of  the overall system,
and the execution  of the system on real  hardware.  Efficiently exploiting
parallelism  on a wide  range of  architectures is  a challenging  task and
should  in our  view be  handled  by a  sophisticated runtime  environment.
Based on  this design philosophy  we have developed and  formalised Glasgow
parallel Haskell (GpH), and implemented  it as a conservative extension of
the Glasgow Haskell Compiler.

%The latter can  be as
%diverse as  a computational Grid composed of  high-performance clusters and
% multi-core machines.   

The high-level nature of declarative languages eases the task of mapping an
algebraic specification down to  executable code.  In fact, the operational
components   of   the   specification   can  already   be   considered   an
implementation, with the associated  properties acting as assertions to the
code. Based on a formal model  of the declarative language, the validity of
these properties can be established by manual proof, which works on a level
of detail  similar to the  specification language itself.  Many operational
aspects, usually  complicating a  proof of an  implementation, do  not come
into   the  picture   at   this  level.    Most  importantly,   unnecessary
sequentialisation of the code is avoided.

However, the goal of implicit  parallelism has proven an elusive one. Often
the automatically  generated threads are too fine-grained  to be efficient.
In  other  cases the  data-dependencies  between  expressions prohibit  the
generation  of a  sufficient amount  of  parallelism.  Thus,  we employ  an
approach of semi-explicit parallelism, where only potential parallelism has
to be annotated in a program, and all aspects of coordination are delegated
to the runtime environment.  A corresponding formal model, in the form of a
structured  operational  semantics,  handling  pools  of  realised  and  of
potential parallelism,  is used  to establish the  correctness of  the code
employing  semi-explicit parallelism.   The runtime  environment  itself is
capable of synchronising parallel threads, using automatic blocking on data
under evaluation,  and by simulating virtual shared  memory across networks
of machines.  Being  embedded into the optimised runtime  environment for a
sequential  language,  we  achieve  efficient  execution  of  a  high-level
language, close  to the  original specification language,  while minimising
the  programmer effort  in parallelising  the  code and  being scalable  to
large-scale applications that can be executed on heterogeneous networks and
computational Grids.
"}

@InProceedings{BZL08,
  author = 	 {J. Berthold and A. {Al Zain} and H-W. Loidl},
  title = 	 {{Scheduling Light-weight Parallelism in ArTCoP}},
  booktitle =	 {{PADL08: Tenth Intl. Symp. on Practical Aspects of Declarative Languages}},
  year =	 2008,
  address =	 {San Francisco, USA},
  month =	 jan,
  keywords =     {GpH, parallel Haskell},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/PADL08.pdf},
  abstract = "
  We present the design and prototype implementation of the scheduling 
  component in ArTCoP (architecture transparent control
  of parallelism), a novel run-time environment (RTE) 
  for parallel execution of high-level languages.
  A key feature of ArTCoP is its support for deep process and memory
  hierarchies, shown in the scheduler by supporting light-weight threads.
  To realise a system with easily exchangeable components, the system
  defines a micro-kernel, providing basic infrastructure, such as 
  garbage collection. All complex RTE operations, including the handling of parallelism,
  are implemented at a separate system level.
  By choosing Concurrent Haskell as high-level system language,
  we obtain a prototype in the form of an executable specification
  that is easier to maintain and more flexible than conventional RTEs.
  We demonstrate the flexibility of this approach
  by presenting implementations of a scheduler for light-weight
  threads in ArTCoP, based on GHC Version 6.6."
}

@Article{ZTML08,
  author = 	 {A. {Al Zain} and P. Trinder and G. Michaelson and H-W. Loidl},
  title = 	 {{Evaluating a High-Level Parallel Language (GpH) for Computational GRIDs}},
  journal = 	 {{IEEE Transactions on Parallel and Distributed Systems}},
  year = 	 2008,
  volume =	 19,
  number =	 2,
  pages =        {219--233},
  month =	 feb,
  keywords =     {GpH, parallel Haskell},
  doi = {http://doi.ieeecomputersociety.org/10.1109/TPDS.2007.70728},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/IEEE_TPDS.pdf},
  abstract = "
  Computational Grids potentially offer low cost, readily available,
  and large-scale high-performance platforms. For the parallel
  execution of programs, however, computational Grids pose serious
  challenges: they are heterogeneous, and have hierarchical and often
  shared interconnects, with high and variable latencies between
  clusters.
  
  This paper investigates whether a programming language with
  high-level parallel coordination and a Distributed Shared Memory
  model (DSM) can deliver good, and scalable, performance on a range
  of computational Grid configurations. The high-level language,
  Glasgow parallel Haskell (GpH), abstracts over the architectural
  complexities of the computational Grid, and we have developed
  GridGUM2, a sophisticated grid-specific implementation of GpH, to
  produce the first high-level DSM parallel language implementation
  for computational Grids.
  
  We report a systematic performance evaluation of GridGUM2 on
  combinations of high/low and homo/hetero-geneous computational
  Grids. We measure the performance of a small set of kernel
  parallel programs representing a variety of application areas, two
  parallel paradigms, and ranges of communication degree and parallel
  irregularity. We investigate GridGUM2's performance scalability
  on medium-scale heterogeneous and high-latency computational
  Grids, and analyse the performance with respect to the program
  characteristics of communication frequency and degree of irregular
  parallelism.
"
}

@InProceedings{ZHT*07,
  author = 	 {A. Al Zain and K. Hammond and P. Trinder and S. Linton and H-W. Loidl and M. Costanti},
  title = 	 {{SymGrid-Par: Designing a Framework for Executing Computational Algebra Systems on Computational Grids}},
  booktitle =	 {{ICCS07: International Conference on Computational Science}},
  pages =	 {617--624},
  year =	 2007,
  series =	 {LNCS~4488},
  publisher =    S-V,
  address =	 {Beijing, China},
  month =	 may,
  keywords =     {parallel applications, symbolic computation},
  ee        = {http://dx.doi.org/10.1007/978-3-540-72586-2_90},
  url = {http://www2.tcs.ifi.lmu.de/~hwloidl/publications/papp2007.pdf},
  abstract = "
  SymGrid-Par is a new framework for executing large computer
  algebra problems on computational Grids. We present the design of
  SymGrid-Par, which supports multiple computer algebra
  packages, and hence provides the novel possibility of composing a
  system using components from different packages.
  Orchestration of the components on the Grid is provided by a
  Grid-enabled parallel Haskell (GpH).  We present a prototype
  implementation of a core component of SymGrid-Par, together with promising 
  measurements of two programs on a modest Grid to demonstrate the
  feasibility of our approach."
}

@Article{AspinallBHLM:TCS2007,
  author = 	 {D. Aspinall and L. Beringer and M. Hofmann and H-W. Loidl and A. Momigliano},
  title = 	 {{A Program Logic for Resources}},
  journal = 	 {{Theoretical Computer Science}},
  year = 	 2007,
  volume =	 389,
  number =	 3,
  pages =	 {411--445},
  month =	 dec,
  keywords =     {PCC},
  doi = {http://dx.doi.org/10.1016/j.tcs.2007.09.003},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/TCS.pdf},
  abstract = "
  We introduce a reasoning infrastructure for proving statements on
  resource consumption in a fragment of the Java Virtual
  Machine Language (JVML). The infrastructure is based on a small hierarchy
  of program logics, with increasing levels of abstraction: 
  at the top there is a type system for a high-level
  language that encodes resource consumption. 
  The infrastructure is designed to be used in 
  a proof-carrying code (PCC) scenario, where mobile programs
  can be equipped with formal evidence that they
  have predictable resource behaviour.
  
  This article presents the core logic in our infrastructure, a
  VDM-style program logic for partial correctness, which can make
  statements about resource consumption in a general form.
  We establish some important results for this logic, including
  soundness and completeness with respect to a resource-aware
  operational semantics for the JVML.  We also present a second
  logic built on top of the core logic, which is used to express
  termination; it is also shown to be sound and complete.  The entire
  infrastructure has been formalised in Isabelle/HOL, both to enhance
  confidence in the meta-theoretical results, and to provide a
  prototype implementation for PCC.  We give examples to show the
  usefulness of this approach, including proofs of resource bounds on
  code resulting from compiling high-level functional programs.
"
}


@InBook{Mdorf,
  author = 	 {M. Hofmann and H-W. Loidl and L. Beringer},
  title = 	 {{Logical Aspects of Secure Computer Systems}},
  chapter = 	 {{Certification of Quantitative Properties of Programs}},
  publisher = 	 {IOS Press},
  year = 	 2005,
  keywords =     {PCC},
  note = 	 {Lecture notes of the Marktoberdorf Summer School, 2--13 Aug 2005},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/mdorf.pdf},
  abstract = "
  In the context of mobile and global computing knowledge of quantitative properties
  of programs is particularly important. Here are some typical scenarios: 
  (1) A
  provider of distributed computational power may only be willing to offer this
  service upon receiving dependable guarantees about the required resource
  consumption.  
  (2) A user of a handheld device, wearable computer, or smart card
  might want to know that a downloaded application will definitely run within the
  limited amount of memory available.
  (3)  Third-party software updates for mobile
  phones, household appliances, or car electronics should come with a guarantee not
  to set system parameters beyond manufacturer-specified safe limits.  

  Requiring certificates of specified resource consumption will also help to prevent
  mobile agents from performing denial of service attacks using bona fide host
  environments as a portal.  These lecture notes describe how such quantitative
  resource-related properties can be inferred automatically using type systems and
  how the results of such analysis can be turned into unforgeable certificates using
  a proof-carrying code framework."
}

@Article{BTL06,
  author = 	 {A. Rauber {Du Bois} and P. Trinder and H-W. Loidl},
  title = 	 {{Strong Mobility in Mobile Haskell}},
  journal = 	 {{Journal of Universal Computer Science}},
  year = 	 2006,
  volume =	 12,
  number =	 7,
  pages =	 {868--884},
  keywords =     {mobile Haskell},
  ee = {http://www.jucs.org/jucs_12_7/strong_mobility_in_mobile},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/strongm.pdf},
}

@Article{ZTLM06,
  author = 	 {A. {Al Zain} and P. Trinder and H-W. Loidl and G. Michaelson},
  title = 	 {{Managing Heterogeneity in a Grid Parallel Haskell}},
  journal = 	 {{Scalable Computing: Practice and Experience}},
  year = 	 2006,
  month =        sep,
  volume =	 7,
  number =	 3,
  pages =	 {9--25},
  note =	 {{Selected papers from Practical Aspects of High-level Parallel Programming, May 22-25, 2005, Atlanta, USA}},
  keywords =     {GpH, parallel Haskell},
  doi           = {http://dx.doi.org/10.1007/11428848_96},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/SCPE_7_3_02.pdf}
}



@InProceedings{Embounded05,
  author = 	 {K. Hammond and R. Dyckhoff and C. Ferdinand and R. Heckmann and M. Hofmann and S. Jost and H-W. Loidl and G. Michaelson and R. Pointon and N. Scaife and J. S{\'e}rot and A. Wallace},
  title = 	 {{The Embounded project (project start paper)}},
  booktitle =	 {{TFP05 --- Trends in Functional Programing}},
  pages =	 {195--210},
  year =	 2007,
  address =	 {Tallinn, Estonia},
  month =	 sep,
  publisher =	 INTELL
}

@InProceedings{SannellaHAGSBLMMS:TFP05,
  author = 	 {D. Sannella and M. Hofmann and D. Aspinall and S. Gilmore and I. Stark and L. Beringer and H-W. Loidl and K. MacKenzie and A. Momigliano and O. Shkaravska},
  title = 	 {{Mobile Resource Guarantees (project evaluation paper)}},
  booktitle =	 {{TFP05 --- Trends in Functional Programing}},
  pages =	 {211--226},
  year =	 2007,
  address =	 {Tallinn, Estonia},
  month =	 sep,
  keywords =     {PCC},
  publisher =	 INTELL,
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/mrg-sum.pdf},
}

@InProceedings{HLB05,
  author =	 {M. Hofmann and H-W. Loidl and L. Beringer},
  title = 	 {{Certification of Quantitative Properties of Programs}},
  booktitle = 	 {{Logical Aspects of Secure Computer Systems}},
  publisher = 	 {IOS Press},
  year = 	 2005,
  note =	 {Marktoberdorf Summer School, Aug 2-13, 2005},
  keywords =     {PCC},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/mdorf.pdf},
}

@Article{BTL05a,
  author = 	 {A. Rauber {Du Bois} and P. Trinder and H-W. Loidl},
  title = 	 {{mHaskell: Mobile Computation in a Purely Functional Language}},
  journal = 	 {{Journal of Universal Computer Science}},
  year = 	 2005,
  volume =	 11,
  number =	 7,
  pages =	 {1234--1254},
  note =	 {Selected papers from the SBLP'05: 9th Brazilian Symposium on Programming Languages, Recife, Brazil, May 23-25, 2005},
  keywords =     {mobile Haskell},
  ee =           {http://www.jucs.org/jucs_11_7/mhaskell_mobile_computation_in},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/jucs.ps.gz}

}

@Article{BTL05,
  author = 	 {A. Rauber {Du Bois} and P. Trinder and H-W. Loidl},
  title = 	 {{Towards Mobility Skeletons}},
  journal = 	 {{Parallel Processing Letters}},
  year = 	 2005,
  volume =	 15,
  number =	 3,
  pages =	 {273--288},
  keywords =     {mobile Haskell},
  ee =           {http://dx.doi.org/10.1142/S0129626405002210},
  url =          {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/cmpp04.ps.gz}
}


@Article{ll-pdmc04,
  author = 	 {M. Lange and H-W. Loidl},
  title = 	 {{Parallel and Symbolic Model Checking for Fixpoint Logic with Chop}},
  journal =      {{Electronic Notes in Theoretical Computer Science}},
  volume =       128,
  number =       3,
  month =        apr,
  pages =        {125--138},
  note =	 {{Selected Papers from PDMC'04: Intl. Workshop on Parallel and Distributed Techniques in Verification, London, U.K, Sep 2004}},
  year =	 2005,
  keywords =     {parallel applications},
  ee        = {http://dx.doi.org/10.1016/j.entcs.2004.10.023},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/pdmc04.ps.gz}
}

@InProceedings{ABHLM04,
  author = 	 {D. Aspinall and L. Beringer and M. Hofmann and H-W. Loidl and A. Momigliano},
  title = 	 {{A Program Logic for Resource Verification}},
  booktitle =	 {{TPHOL04: Intl. Conf. on Theorem Proving in Higher Order Logics, Park City, UT, USA, September, 2004}},
  pages =	 {34--49},
  year =	 2004,
  month =	 sep,
  series =       {LNCS~3223},
  publisher =    S-V,
  keywords =     {PCC},
  ee        = {http://springerlink.metapress.com/openurl.asp?genre=article{\&}issn=0302-9743{\&}volume=3223{\&}spage=34},
  url =  {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/tphol04.ps.gz}
}

@InProceedings{BTL03,
  author = 	 {A. Rauber {Du Bois} and P. Trinder and H-W. Loidl},
  title = 	 {{Implementing Mobile Haskell}},
  booktitle =	 {{TFP03: Fourth Symposium on Trends in Functional Programming}},
  pages =	 {79--94},
  year =	 2003,
  address =	 {Edinburgh, Scotland},
  month =	 sep,
  publisher =	 INTELL,
  keywords =     {mobile Haskell},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/tfp03.ps.gz}
}

@InProceedings{BTL03a,
  author = 	 {A. Rauber Du Bois, P. Trinder, H-W. Loidl},
  title = 	 {{Mobile Computation in Haskell}},
  booktitle =	 {{WFLP'03: 12th Int'l Workshop on Functional and (Constraint) Logic Programming}},
  year =	 2003,
  address =	 {Valencia, Spain},
  month =	 jun,
  keywords =     {mobile Haskell},
  url = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/wflp03.ps.gz}
}

%% Fellowship period

%%% Our book chapter

@InBook{TLH99,
  author =	 {Trinder, P.W. and Loidl, H-W. and Hammond, K.},
  ALTeditor =	 {Hammond, K. and Michaelson, G.},
  title = 	 {{Research Directions in Parallel Functional Programming}},
  chapter = 	 {{Large-Scale Functional Applications}},
  publisher = 	 S-V,
  year = 	 1999,
  month =	 oct,
  pages =	 {399--426},
  keywords =     {GpH, parallel Haskell, parallel applications},
}

%%% %%% Journal articles

@Article{HOSC03,
  author = 	 {Loidl, H-W. and {Rubio Diez}, F.  and Scaife, N.R. and  Hammond, K. and Klusik, U. and  Loogen, R. and  Michaelson, G.J. and  Horiguchi, S.  and {Pena Mari}, R. and Priebe, S.M. and  {Rebon Portillo}, A.J.  and Trinder, P.W.},
  title = 	 {{Comparing Parallel Functional Languages: Programming and Performance}},
  journal = 	 {Higher-order and Symbolic Computation},
  year = 	 2003,
  volume =	 16,
  number =	 3,
  keywords =     {GpH, parallel Haskell, parallel applications},
  url =  {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/HOSC03.ps.gz},
}

%%% %$%cindex Our JFP survey paper

@Article{TLP01,
  author = 	 {Trinder, P.W. and Loidl, H-W. and Pointon, R.F.},
  title = 	 {{Parallel and Distributed Haskells}},
  journal = 	 {J.\ of Functional Programming},
  year = 	 2002,
  month =      jul,
  volume =     12,
  number =     {4\&5},
  pages =      {469--510},
  keywords =     {GpH, parallel Haskell, GdH, distributed Haskell},
  abstractURL = {http://www.dcs.glasgow.ac.uk/jfp/bibliography/References/trinderlp2002:469.html},
  url = {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/jfp01.ps.gz},
}

%$%cindex Our HLPP paper

@Article{LTB01,
  author = 	 {Loidl, H-W. and Trinder, P.W. and Butz, C.},
  title = 	 {{Tuning Task Granularity and Data Locality of Data Parallel GpH Programs}},
  journal = 	 {Parallel Processing Letters},
  year = 	 2001,
  month =      dec,
  volume =     11,
  number =     4,
  note =	 {{Selected papers from HLPP'01 --- International Workshop on
High-level Parallel Programming and Applications, Orleans, France, 26-27 March, 2001}},
  keywords =     {GpH, parallel Haskell},
  url =  {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/hlpp01.ps.gz},
}

%$%cindex Our CPE paper

@Article{LTH*98,
  author = 	 {Loidl, H-W. and Trinder, P.W. and Hammond, K. and Junaidu, S.B. and Morgan, R.G. and {Peyton Jones}, S.L.},
  title = 	 {{Engineering Parallel Symbolic Programs in GPH}},
  journal = 	 CPE,
  year = 	 1999,
  volume =       {11},
  issue =        {12},
  pages =        {701--752},
  OPTnote =	 {To appear},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/cpe.ps.gz},
  OPTannote =       {ToDo: put the paper on-line},
  keywords =     {GpH, parallel Haskell, parallel applications},
  abstract =     "We investigate the claim that functional languages offer low-cost
parallelism in the context of symbolic programs on modest parallel
architectures. In our investigation we present the first comparative study
of the construction of large applications in a parallel functional
language, in our case in Glasgow Parallel Haskell (GPH). The applications
cover a range of application areas, use several parallel programming
paradigms, and are measured on two very different parallel architectures.

On the applications level the most significant result is that we are able
to achieve modest wall-clock speedups (between factors of 2 and 10) over
the optimised sequential versions for all but one of the programs. Speedups
are obtained even for programs that were not written with the intention of
being parallelised. These gains are achieved with a relatively small
programmer-effort. One reason for the relative ease of parallelisation is
the use of evaluation strategies, a new parallel programming technique that
separates the algorithm from the coordination of parallel behaviour.

On the language level we show that the combination of lazy and parallel
evaluation is useful for achieving a high level of abstraction. In
particular we can describe top-level parallelism, and also preserve module
abstraction by describing parallelism over the data structures provided at
the module interface (``data-oriented parallelism'').  Furthermore, we find
that the determinism of the language is helpful, as is the largely-implicit
nature of parallelism in GPH."
}

%%% %%% Conference papers

@inproceedings{mhaskell,
  author       = {{Du Bois}, A.R. and Trinder, P.W and Loidl, H-W.},
  title        = {{Towards a Mobile Haskell}},
  year         = {2003},
  address      = {Valencia, Spain},
  booktitle    = {{WFLP 2003 --- Intl.\ Workshop on Functional and (Constraint) Logic Programming}},
  keywords =     {mobile Haskell},
  pages        = {113--116}
}

@InProceedings{DSM02,
  author = 	 {Loidl, H-W.},
  title = 	 {{The Virtual Shared Memory Performance of a Parallel Graph Reducer}},
  booktitle =  {{CCGrid/DSM 2002 --- Intl.\  Symp.\  on Cluster Computing and the Grid}},
  OPTeditor =     {Bal, H. and L{\"o}hr, K-P. and Reinefeld, A.},
  address =    {Berlin, Germany, May~21--24},
  OPTmonth =      may,
  year = 	 2002,
  publisher =  {IEEE Press},
  pages =      {311--318},
  OPTnote =	 {{Organised with CCGrid 2002 --- International Symposium on Cluster Computing and the Grid}},
  keywords =     {GpH, parallel Haskell},
  url =        {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/dsm02.ps.gz},
}

@InProceedings{amb,
  author = 	 {{Du Bois}, {A.R.} and Pointon, R. and Loidl, H-W. and Trinder, P.},
  title = 	 {{Implementing Declarative Parallel Bottom-Avoiding Choice}},
  booktitle =	 {SBAC-PAD 2002 --- Symp.\ on Computer Architecture and High Performance Computing},
  year =	 2002,
  pages =        {82--92},
  address =	 {Vitoria, Brazil},
  month =	 oct,
  keywords =     {parallel Haskell},
  url =	         {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/pad02.ps.gz}
}

@InProceedings{EuroPar00,
  author = 	 {Trinder, P.W. and Loidl, H-W. and {Barry Jr.}, E. and Hammond, K. and Klusik, U. and {Peyton Jones}, S.L and  {Reb{\'o}n Portillo}, A.J.},
  title = 	 {{The Multi-Architecture Performance of the Parallel Functional Language GpH}},
  booktitle = 	 {{Euro-Par 2000 --- Parallel Processing}},
  year =	 2000,
  volume =       1900,
  series =	 {LNCS},
  pages =        {739--743},
  address =	 {Munich, Germany},
  month =	 aug,
  publisher =	 S-V,
  doi =          {http://dx.doi.org/10.1007/3-540-44520-X_101},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/europar00.ps.gz},
  keywords =     {GpH, parallel Haskell},
  abstract =     "In principle, functional languages promise straightforward
architecture-independent parallelism, because of their high level
description of parallelism, dynamic management of parallelism and
deterministic semantics.  However, these language features come at the
expense of a sophisticated compiler and/or runtime-system. The problem
we address is whether such an elaborate  system can deliver
acceptable performance on a variety of parallel architectures.  In
particular we report performance measurements for the GUM
runtime-system on eight parallel architectures, including massively
parallel, distributed-memory, shared-memory and workstation networks.",
}

%%% %%% Workshop papers

@InProceedings{Gran,
  author = 	 {{Reb{\'o}n Portillo}, A.R. and  Hammond, K. and Loidl, H-W. and Vasconcelos, P.},
  title = 	 {{Cost Analysis using Automatic Size and Time Inference}},
  booktitle =	 {{IFL'02 ---  Intl.\ Workshop on the Implementation of Functional Languages}},
  year =	 2002,
  series =	 {LNCS},
  volume =       {2670},
  pages =        {232--247},
  address =	 {Madrid, Spain},
  month =	 sep,
  publisher =	 S-V,
  keywords =     {Resource analysis},
  url =	         {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/Analysis-IFL02.ps.gz},
}

@InProceedings{TM,
  author = 	 {{Du Bois}, A.R. and Loidl, H-W. and Trinder, P.},
  title = 	 {{Thread Migration in a Parallel Graph Reducer}},
  booktitle =	 {{IFL'02 ---  Intl.\ Workshop on the Implementation of Functional Languages}},
  year =	 2002,
  series =	 {LNCS},
  volume =       {2670},
  pages =        {199--214},
  address =	 {Madrid, Spain},
  month =	 sep,
  publisher =	 S-V,
  keywords =     {GpH, parallel Haskell, GUM},
  url =	         {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/Migration-IFL02.ps.gz},
}

@InProceedings{SFP01-GUM,
 title={{Load Balancing in a Parallel Graph Reducer}},
 author={Loidl, H-W.},
 pages={63--74},
 booktitle =    {{SFP'01 --- Scottish Functional Programming Workshop}},
 series={Trends in Functional Programming},
 volume={3},
 editor={Hammond, K. and Curtis, S.},
 publisher=INTELL,
 address={Bristol, UK},
 OPTaddress =  {Univ of Stirling, Scotland},
 year={2001},
  keywords =     {GpH, parallel Haskell, GUM},
 url={http://www.macs.hw.ac.uk/~dsg/gph/papers/drafts/sfp01-gum.ps.gz},
}


@InProceedings{EUROCAST01,
  author = 	 {R.F. Pointon and S.M. Priebe and H-W. Loidl and R. Loogen and P.W. Trinder},
  title = 	 {{Functional vs Object-Oriented Distributed Languages}},
  booktitle = {{EUROCAST'01 --- Intl.\ Conf.\ on Computer Aided Systems Theory, Formal Methods and Tools for Computer Science}},
  pages = 	 {257--260},
  year = 	 2001,
  series = 	 {LNCS~2178},
  address = 	 {Palmas de Gran Canaria, Spain},
  publisher = S-V,
  keywords =     {GdH, distributed Haskell},
  doi = {http://dx.doi.org/10.1007/3-540-45654-6_49},
  url = {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/eurocast01.ps.gz},
}

@InProceedings{LKH*00,
  author = 	 {Loidl, H-W. and Klusik, U. and Hammond, K. and Loogen, R. and Trinder, P.W.},
  title = 	 {{GpH and Eden: Comparing Two Parallel Functional Languages on a Beowulf Cluster}},
  booktitle = 	 {{SFP'00 --- Scottish Functional Programming Workshop}},
  year =	 2000,
  address =	 {University of St Andrews, Scotland},
  month =	 jul,
  series =       {Trends in Functional Programming},
  volume =       2,
  publisher =    INTELL,
  pages =        {39--52},
  keywords =     {GpH, parallel Haskell},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/sfp00.ps.gz},
  abstract =     "We investigate two similar but contrasting parallel functional language
designs: Eden and GpH.  Both languages use the non-strict functional
language Haskell as a core expression language, both their
implementations are based on the same host sequential implementation ---
the high performance Glasgow Haskell Compiler (GHC), and both
implementations are available on the same distributed architecture ---
the St Andrews Beowulf cluster.  This allows an exceptionally pure
comparison of the language design characteristics and their impact on
parallel performance.

The comparison is illustrated by two parallel benchmarks which expose
differences in the communication, process creation, and work distribution
mechanisms employed by Eden and GpH.  Our results show that the
explicit process model favoured by Eden gives good parallel performance
for coarse-grained applications running on the Beowulf cluster.  In
comparison, the implicit process model used in GpH gives poorer
absolute speedup for this class of application on this architecture.
Further work is needed to verify the reasons for this difference
in performance and to extend these results to other architectures."
}

@InProceedings{TPL00,
  author = 	 {Trinder, P.W. and Pointon, R.F. and Loidl, H-W.},
  title = 	 {{Runtime System Level Fault Tolerance for a Distributed Functional Language}},
  booktitle = 	 {{SFP'00 --- Scottish Functional Programming Workshop}},
  year =	 2000,
  address =	 {University of St Andrews, Scotland},
  month =	 jul,
  pages =	 {103--114},
  series =       {Trends in Functional Programming},
  volume =       2,
  publisher =    INTELL,
  keywords =     {GdH, distributed Haskell, GUM},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/sfp00a.ps.gz},
  abstract =     "Functional languages potentially offer benefits for distributed fault
tolerance: many computations are pure, and hence have no side-effects
to be reversed during error recovery; moreover functional languages
have a high-level runtime system (RTS) where computations and data are
readily manipulated. We propose a new RTS level of fault tolerance for
distributed functional languages, and outline a design for its
implementation for the GdH language. The design distinguishes
between pure and impure computations: impure computations must be
recovered using conventional exception-based techniques, but the RTS
attempts implicit recovery of pure computations."
}


@InProceedings{PTL00,
  author = 	 {Pointon, R.F. and Trinder, P.W. and Loidl, H-W.},
  title = 	 {{The Design and Implementation of GdH: a Distributed Functional Language}},
  booktitle = 	 {{IFL'00  --- International Workshop on the Implementation of
                  Functional Languages}},
  year =	 2000,
  series =	 LNCS,
  volume =       {2011},
  address =	 {RWTH Aachen, Germany},
  month =	 sep,
  publisher =	 {S-V},
  pages =        {53--70},
  keywords =     {GdH, distributed Haskell},
  doi =          {http://dx.doi.org/10.1007/3-540-45361-X_4},
  url =	         {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/ifl00.ps.gz},
}

%%% %%% TEchnical reports
%%% %$%cindex Our HasPar paper

%%% Article{HKL*00,
%%%   author = 	 {Hammond, K. and King, D.J. and Loidl, H-W. and {Reb\'{o}n Portillo}, \'{A}.J. and Trinder, P.W.},
%%%   title = 	 {{The HasPar Performance Evaluation Suite for {\sc GpH}: a Parallel Non-Strict Functional Language}},
%%%   journal = 	 SPE,
%%%   month =        feb,
%%%   year = 	 2000,
%%%   note =	 {Submitted for publication},
%%%   abstract =     "The ultimate purpose of parallel computation is to improve performance by
%%% exploiting hardware duplication.  In order to achieve this improvement,
%%% it is essential to have a good understanding of real parallel behaviour.
%%% This paper introduces the HasPar integrated suite of
%%% performance evaluation tools for Glasgow Parallel Haskell (GpH), a
%%% high-performance parallel non-strict functional language.  This suite provides a
%%% framework for assessing and improving parallel program performance that
%%% has been used successfully on a number of large functional programs.

%%% The HasPar suite includes both idealised and realistic simulators
%%% for GpH.  It also incorporates an instrumented parallel
%%% implementation that can be used on a range of architectures including
%%% both tightly-coupled multiprocessors and loosely-coupled networks of
%%% workstations. An important feature of the tools is that they allow
%%% costs to be attributed to the parallel program source
%%% using either static or dynamic cost attribution mechanisms, as
%%% appropriate.  The resulting performance profiles can be visualised in
%%% a number of different ways, as illustrated in this paper.",
%%% }

%%% InProceedings{Loid99a,
%%%   author = 	 {Loidl, H-W.},
%%%   title = 	 {{Making GUM more Flexible}},
%%%   booktitle = 	 {SFP99 --- Scottish Functional Programming Workshop, University of Stirling, Aug 29 -- Sep 1},
%%%   year =	 1999,
%%%   annote =	 {Draft Proceedings},
%%% }

%%% % ---------------------------------------------------------------------------

%%% %%% PhD period

@PhdThesis{loidl-thesis,
  author =   {Loidl, H-W.},
  title =    {{Granularity in Large-Scale Parallel Functional Programming}},
  school =   {Dept.\ of Computing Science, Univ.\ of Glasgow},
  year =     1998,
  month =    mar,
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/loidl-thesis.ps.gz},
  ALTurl = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/Glasgow/PhD.pdf},
  ALTurl = {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/PhD.ps.gz},
  abstract =     "This thesis demonstrates how to reduce the runtime of large non-strict
  functional programs using parallel evaluation. The parallelisation of
  several  programs  shows the importance of granularity,
  i.e. the computation costs of program expressions. The aspect of
  granularity is studied both on a practical level, by presenting and
  measuring runtime granularity improvement mechanisms, and at a more 
  formal level, by devising a static granularity analysis.

  By parallelising several large functional programs this thesis
  demonstrates for the first time the advantages of combining lazy and
  parallel evaluation on a large scale: laziness aids modularity, while
  parallelism reduces runtime.  One of the parallel programs is the
  Lolita system
  which, with more than
  47,000 lines of code, is the largest existing parallel non-strict functional program.
  A new mechanism for parallel programming, evaluation strategies, to which
  this thesis contributes, is shown to be useful in this parallelisation.
  Evaluation strategies simplify parallel programming by separating
  algorithmic code from code specifying dynamic behaviour. For large programs 
  the abstraction provided by functions 
  is maintained by using a data-oriented style of
  parallelism, which defines parallelism over intermediate data
  structures rather than inside the functions.

  A highly parameterised simulator, GranSim, has been constructed
  collaboratively and is discussed in detail in this thesis. GranSim is a
  tool for architecture-independent parallelisation and  a testbed
  for implementing runtime-system features of the parallel graph reduction
  model. By providing an idealised as well as an accurate model of the
  underlying parallel machine, GranSim has proven to be an essential part
  of an integrated parallel software engineering environment. Several
  parallel runtime-system features, such as granularity improvement
  mechanisms, have been tested via GranSim.  It is publicly available and
  in active use at several universities worldwide.

  In order to provide granularity information
  this thesis presents an inference-based static granularity analysis.
  This analysis combines two existing analyses, one for cost and one 
  for size information. It determines an upper bound for
  the computation costs of evaluating an expression in a simple strict
  higher-order language. By exposing recurrences during cost reconstruction
  and using a library of recurrences and their closed forms,
  it is  possible to infer the  costs  for some recursive functions. 
  The possible
  performance improvements are assessed by measuring the parallel
  performance of a hand-analysed and annotated program."
}

@Article{Strategies,
  author =   {Trinder, P.W. and Hammond, K. and Loidl, H-W. and {Peyton Jones}, S.L.},
  title =    {{Algorithm + Strategy = Parallelism}},
  journal =      {J.\ of Functional Programming},
  year =     {1998},
  OPTkey =   {},
  volume =   {8},
  number =   {1},
  pages =    {23--60},
  month =    {January},
  keywords =     {GpH, parallel Haskell, evaluation strategies},
  doi =  {http://dx.doi.org/10.1017/S0956796897002967},
  url = {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/strategies.ps.gz},
  ALTurl = {http://www.macs.hw.ac.uk/~dsg/gph/papers/html/Strategies/strategies.html},
  ALTurl = {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/Glasgow/strategies.ps.gz},
  abstract =     "
The process of  writing large parallel programs is  complicated by the need
to specify both the parallel  behaviour of the   program and the  algorithm
that is to be used to compute its  result. This paper introduces evaluation
strategies,  lazy   higher-order functions     that control  the   parallel
evaluation of non-strict functional languages. Using evaluation strategies,
it  is  possible to achieve  a  clean   separation between algorithmic  and
behavioural code. The  result is   enhanced  clarity and shorter   parallel
programs.

Evaluation strategies are a very general concept: this paper shows how they
can be used to  model a wide range  of commonly used programming paradigms,
including  divide-and-conquer, pipeline   parallelism,    producer/consumer
parallelism, and data-oriented   parallelism.  Because they are   based  on
unrestricted  higher-order  functions,  they   can also capture   irregular
parallel structures.

Evaluation strategies  are not   just  of theoretical interest:   they have
evolved out of our experience in parallelising several large-scale parallel
applications, where they have  proved invaluable in  helping to manage  the
complexities of parallel behaviour.   These applications are  described  in
detail here. The largest application we have studied  to date, Lolita, is a
60,000 line  natural language parser. Initial  results  show that for these
programs  we can achieve  acceptable  parallel performance, while incurring
minimal overhead for using evaluation strategies."
}

@InProceedings{LoTr97,
  author =   {Loidl, H-W. and Trinder, P.W.},
  title =    {{Engineering Large Parallel Functional Programs}},
  booktitle =    {IFL '97 --- Intl.\ Workshop on the Implementation  of Functional Languages 1997},
  pages =    {178--197},
  year =     1997,
  volume =   1467,
  series =   {LNCS},
  address =  {St Andrews, Scotland, Sep 10--12},
  OPTmonth =    {September},
  publisher = S-V,
  keywords =     {GpH, parallel Haskell, parallel applications},
  doi = {http://dx.doi.org/10.1007/BFb0055431},
  url = {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/IFL97.ps.gz},
  abstract =     "The design and implementation of useful programming languages, whether
  sequential or parallel, should be driven by large, realistic
  applications.  In constructing several medium- and large-scale programs
  in Glasgow Parallel Haskell, GPH, a parallel extension of Haskell,
  the group at Glasgow has investigated several important engineering
  issues:
  \begin{itemize}
  \item {\it Real Application Parallelism.} The programs achieve good
    wall-clock speedups and acceptable scale-up on both a shared-memory and
    a distributed memory machine. The programs typify a number of
    application areas and use a number of different parallel paradigms,
    e.g. pipelining or divide-and-conquer, often combining several
    paradigms in a single program.
  \item {\it Language Issues.} Although the largely implicit parallelism in
    GPH is a satisfactory programming model in general the base
    constructs for introducing and controlling parallelism tend to
    obfuscate the semantics of large programs.  As a result we developed
    evaluation strategies, a more abstract, and systematic mechanism
    for introducing and controlling parallelism. 
  \item {\it Engineering Environment.} The development and performance
    tuning of these programs emphasised the importance of an integrated
    engineering environment. In the process we have refined components of
    this environment like the simulator, the runtime system, and the
    profiling tools.
\end{itemize}"
}

@InProceedings{Lolita,
  author =   {Loidl, Hans-Wolfgang and Morgan, Richard and Trinder, Phil W. and Poria, Sanjay and Cooper, Chris and Peyton Jones, Simon L. and Garigliano, Roberto},
  title =    {{Parallelising a Large Functional Program Or: Keeping LOLITA Busy}},
  booktitle =    {IFL '97 --- Intl.\ Workshop on the Implementation  of Functional Languages 1997},
  pages =    {198--213},
  year =     {1997},
  volume =   {1467},
  series =   {LNCS},
  address =  {St Andrews, Scotland, Sep 10--12},
  OPTmonth =    {September},
  publisher = S-V,
  keywords =     {GpH, parallel Haskell, parallel applications},
  doi = {http://dx.doi.org/10.1007/BFb0055432},
  url = {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/lolita.ps.gz},
  abstract =     "In this paper we report on the ongoing parallelisation of Lolita,
a natural language engineering system. Although Lolita currently exhibits only modest
parallelism, we believe that it is the largest parallel functional program
ever, comprising more than 47,000 lines of Haskell. Lolita has the
following interesting features common to real world applications of lazy languages:
\begin{itemize}
\item the code was not specifically designed for parallelism; 
\item laziness is essential for efficiency in Lolita; 
\item Lolita interfaces to data structures outside the Haskell heap,
  using a foreign language interface; 
\item Lolita was not written by those most closely involved in the
  parallelisation.
\end{itemize}

Our expectations in parallelising the program were to achieve moderate
speedups with small changes in the code.  To date speedups of up to 2.4
have been achieved for Lolita running under a realistic simulation of our
4~processor shared-memory target machine. Most notably, the parallelism is
achieved with a very small number of changes to, and without requiring an
understanding of most of the application.  On the Sun SPARCserver target
machine wall-clock speedup is currently limited by physical memory
availability."
}

@InProceedings{Loid97a,
  author = 	 {Loidl, H-W.},
  title = 	 {{LinSolv: a Case Study in Strategic Parallelism}},
  year =         1997,
  booktitle =	 {Glasgow Workshop on Functional Programming},
  address =	 {Ullapool, Scotland, September 15--17},
  keywords =     {GpH, parallel Haskell, parallel applications},
  AlTurl =       {http://www.dcs.gla.ac.uk/fp/workshops/fpw97/Loidl.ps},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/linsolv.ps.gz},
}

@InProceedings{HLT97,
  author = 	 {Hammond, K. and Loidl, H-W. and Trinder, P.W.},
  title = 	 {{Parallel Cost Centre Profiling}},
  year =         1997,
  booktitle =	 {Glasgow Workshop on Functional Programming},
  address =	 {Ullapool, Scotland, September 15--17},
  pages =        {51--72}, 
  note =         {Draft proceedings},
  keywords =     {GpH, parallel Haskell},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/grancc.ps.gz},
  ALTurl =       {http://www.dcs.gla.ac.uk/fp/workshops/fpw97/HammondLoidlTrinder.ps},
}

@InProceedings{HLT*97,
  author = 	 {Hall, C.V. and Loidl, H-W. and Trinder, P.W. and Hammond,
                  K. and O'Donnell, J.T.},
  title = 	 {{Refining a Parallel Algorithm for Calculating Bowings}},
  year =         1997,
  booktitle =	 {Glasgow Workshop on Functional Programming},
  address =	 {Ullapool, Scotland, September 15--17},
  keywords =     {GpH, parallel Haskell},
  keywords =     {GpH, parallel Haskell, parallel applications},
  note =         {Draft proceedings},
  url =          {http://www.dcs.gla.ac.uk/fp/workshops/fpw97/Hall-et-al.ps},
}

@InProceedings{LoHa96,
  author =       {Loidl, H-W. and Hammond, K.},
  title =        {{A Sized Time System for a Parallel Functional Language}},
  year =         {1996},
  booktitle =	 {Glasgow Workshop on Functional Programming},
  address =	 {Ullapool, Scotland, July 8--10},
  keywords =     {Resource analysis},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/sized.ps.gz},
  ALTurl =          {http://www.dcs.glasgow.ac.uk/fp/workshops/fpw96/Loidl.ps.gz},
  abstractURL =  {http://www.dcs.glasgow.ac.uk/fp/workshops/fpw96/Proceedings96.html},
  abstract =     "This paper describes an inference system, whose purpose is to determine the
cost of evaluating expressions in a strict purely functional
language. Upper bounds can be derived for both computation cost and the
size of data structures. We outline a static analysis based on this
inference system for inferring size and cost information. The analysis is a
synthesis of the sized types of Hughes et al., and the polymorphic time
system of Dornic et al., which was extended to static dependent costs by
Reistad and Gifford.

Our main interest in cost information is for scheduling tasks in the
parallel execution of functional languages. Using the GranSim parallel
simulator, we show that the information provided by our analysis is
sufficient to characterise relative task granularities for a simple
functional program. This information can be used in the runtime-system of
the Glasgow Parallel Haskell compiler to improve dynamic program
performance."
}

@InProceedings{HLP95,
  author =       {Hammond, K. and Loidl, H-W. and Partridge, A.},
  title =        {{Visualising Granularity in Parallel Programs: A
                   Graphical Winnowing System for Haskell}},
  year =         {1995},
  booktitle =	 {HPFC'95 --- High Performance Functional Computing},
  pages =        {208--221},
  address =	 {Denver, Colorado, April 10--12},
  keywords =     {GpH, parallel Haskell, GranSim},
  ALTurl =       {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/HPFC95.pdf},
  url =          {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/HPFC95.ps.gz},
  abstract =     "To take advantage of distributed-memory parallel machines it is essential
to have good control of task granularity. This paper describes a fairly
accurate parallel simulator for Haskell, based on the Glasgow compiler, and
complementary tools for visualising task granularities. Together these
tools allow us to study the effects of various annotations on task
granularity on a variety of simulated parallel architectures. They also
provide a more precise tool for the study of parallel execution than has
previously been available for Haskell programs.

These tools have already confirmed that thread migration is essential in
parallel systems, demonstrated a close correlation between thread execution
times and total heap allocations, and shown that fetching data
synchronously normally gives better overall performance than asynchronous
fetching, if data is fetched on demand."
} 

@InProceedings{LoHa95,
  author =       {Loidl, H-W. and Hammond, K.},
  title =        {{On the Granularity of Divide-and-Conquer Parallelism}},
  year =         {1995},
  series =	 {Workshops in Computing},
  booktitle =	 {Glasgow Workshop on Functional Programming},
  publisher =	 S-V,
  address =	 {Ullapool, Scotland, July 8--10},
  keywords =     {GpH, parallel Haskell},
  url =          {http://www.macs.hw.ac.uk/~dsg/gph/papers/ps/div-conc.ps.gz},
  ALTurl =          {http://www.dcs.gla.ac.uk/~hwloidl/publications/GlaFp95.ps.gz},
  abstract =     "This paper studies the runtime behaviour of various parallel
divide-and-conquer algorithms written in a non-strict          
functional language, when three common granularity control      
mechanisms are used: a simple cut-off, a priority thread       
creation and a priority scheduling mechanism. These mechanisms  
use granularity information that is currently provided via      
annotations to improve the performance of the parallel          
programs.                                                       
                                                                
The programs we examine are several variants of a generic       
divide-and-conquer program, an unbalanced divide-and-conquer   
algorithm and a parallel determinant computation. Our results   
indicate that for balanced computation trees a simple,          
low-overhead mechanism performs well whereas the more complex   
mechanisms offer further improvements for unbalanced            
computation trees."
}

%% InProceedings{HLP94,
%%   author =       {Hammond, K. and Loidl, H-W. and Partridge, A.},
%%   title =        {{Improving Granularity in Parallel Functional Programs: A
%%                    Graphical Winnowing System for Haskell}},
%%   year =         {1994},
%%   series =	 {Workshops in Computing},
%%   booktitle =	 {Glasgow Workshop on Functional Programming},
%%   pages =        {111--126},
%%   publisher =	 S-V,
%%   address =	 {Ayr, Scotland, September 12--14},
%%   note =         {Superseded by the HPFC-95 paper},
%% }

%%% Technical Reports


@InProceedings{THL*94,
  author = 	 {Trinder, P. and Hammond, K. and Loidl, H-W. and {Mattson Jr.},
		  J. and Partridge, A. and {Peyton Jones}, S.L.},
  title = 	 {{GRAPHing the Future}},
  booktitle =    {{IFL'94 --- International Workshop on the Implementation of
                   Functional Languages}}, 
  year =	 {1994},
  address =	 {University of East Anglia, Norwich, U.K., September 7--9},
  keywords =     {GpH, parallel Haskell},
  ALTurl =       {http://www.tcs.informatik.uni-muenchen.de/~hwloidl/publications/GRAPHing.pdf},
  url =          {ftp://ftp.dcs.gla.ac.uk/pub/glasgow-fp/authors/Hans_Loidl/publications/gum-ifl94.ps.gz},
  abstract =     "At Glasgow our research into parallel functional programming has been
moving away from our novel architecture, GRIP towards the provision of
a general parallel runtime environment.  We call this GRAPH (Graph
Reduction for an Assortment of Parallel Hardware).

This paper describes the design of a new memory and load management
model for GRAPH, which is intended to match shared- and
distributed-memory machines better and to provide a framework for our
research into parallel functional databases and granularity control.
This model is currently under implementation.  No performance
results can therefore be provided at the present time."
}

@InProceedings{LoHa94,
  author = 	 {Loidl, H-W. and Hammond, K.},
  title = 	 {{GRAPH for PVM: Graph Reduction for Distributed Hardware}},
  booktitle =    {{IFL'94 --- International Workshop on the Implementation of
                   Functional Languages}},
  year =	 {1994},
  address =	 {University of East Anglia, Norwich, U.K., September 7--9},
  keywords =     {GpH, parallel Haskell},
  url =          {http://www.dcs.gla.ac.uk/~hwloidl/publications/IFL94.ps.gz},
  abstract =     "We describe a  version of the GRAPH  system (Graph Reduction for
  an   Assortment  of Parallel  Hardware)    designed to  execute  parallel
  functional   programs on  a range  of   distributed-memory MIMD  machines
  running  the  PVM communications harness.    GRAPH was developed from the
  runtime system for the novel  GRIP multiprocessor.  Although this  system
  has  proved highly successful, being  a novel architecture  it is hard to
  compare results with  other  architectures or implementations.  The  CPUs
  used in the GRIP design are also rather old.

  The  principal extensions  from  GRAPH  for  GRIP  to GRAPH  for PVM  are
  intended  to handle   high  latencies  more efficiently,  by   exploiting
  asynchronous  communication, multi-threaded  scheduling, and by  grouping
  packets into larger entities where possible.   The main innovation is the
  development of  new, sophisticated {\em  packet flushing} and {\em packet
    fetching} algorithms whose purpose is to allow graphs to be exported to
  and fetched from  global memory  without inter-processor synchronisation.
  As communication and    reduction  can be interleaved,    a  new form   of
  synchronising communication and reduction is necessary.

  We also give a short outlook on the design  of a new portable distributed
  graph reduction     system, GRAPH for  UMM,  that   adepts the  ideas and
  techniques originally  developed  for the  GRIP system  to  a distributed
  memory environment.

  GRAPH for PVM has been implemented  on a network  of SUN workstations and
  it  is  currently being   tested and   debugged.   Although no  extensive
  performance  measurements have  been  performed  yet, the available  data
  shows a significant  decrease in the overall   communication and thus  an
  improvement in the overall performance of the system."
}

%%% %%% Masters period

@mastersthesis{Loid92,
  author =       {Loidl, H-W.},
  title =        {{A Parallelizing Compiler for the Functional Programming
                   Language EVE}}, 
  month =        {May},
  year =         {1992},
  school =       {RISC-Linz},
  address =      {Johannes Kepler University, Linz, Austria},
  descr =        {plfun-eve},
}
%note = {Also: Technical Report 92-30, RISC-Linz, Johannes Kepler University,
%              Linz, Austria, May 1992. Also: Technical Report 93-16, ACPC
%              Technical Report Series, Austrian Center for Parallel
%	      Computation, July 1993.},

@TechReport{Loid93b,
  author = 	 {Loidl, Hans Wolfgang},
  title = 	 {{Solving a System of Linear Equations by Using a Modular
		  Method}}, 
  number = 	 {93-69},
  institution =  {RISC-Linz},
  month =        {December},
  year = 	 {1993},
  address =	 {Johannes Kepler University, Linz, Austria},
  descr =        {paca},
}

@InProceedings{LiLo93,
  author = 	 {Limongelli, C. and Loidl, H-W.},
  title = 	 {{Rational Number Arithmetic by Parallel P-adic Algorithms}},
  OPTeditor =	 {Volkert, J.},
  volume =	 734,
  series =	 LNCS,
  pages =	 {72--86},
  booktitle =	 {ACPC'93 --- Parallel Computation --- Second International ACPC
		  Conference}, 
  year =	 1993,
  OPTorganization = {Austrian Center for Parallel Computation (ACPC)},
  publisher =	 S-V,
  address =	 {Gmunden, Austria, October 4--6},
  ALTurl =          {http://www.dcs.gla.ac.uk/~hwloidl/publications/p-adic.ps.gz},
  descr =        {paca},
  url = {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/p-adic.ps.gz},
  OPTalso =	 {Also: Technical Report 93-41, RISC-Linz, Johannes Kepler
		  University, Linz, Austria, July 1993.},
}

@inproceedings{HoLo94,
  author =       {Hong, H. and Loidl, H-W.},
  title =        {{Parallel Computation of Modular Multivariate Polynomial
                   Resultants on a Shared Memory Machine}},
  OPTeditor =	 {Buchberger, B. and Volkert, J.},
  volume =	 {854},
  series =	 {Lecture Notes in Computer Science},
  pages =	 {325--336},
  booktitle =    {CONPAR'94 --- Conference on Parallel and Vector Processing}, 
  year =         {1994},
  address =      {Linz, Austria, September 6--8},
  url =          {http://www.dcs.gla.ac.uk/~hwloidl/publications/resultant.ps.gz},
  OPTalso =         {Also: Technical Report 94-19, RISC-Linz, Johannes Kepler
		  University, Linz, Austria, 1994.}, 
}

@techreport{HSN*92,
  author =       {Hong, H. and Schreiner, W. and Neubacher, A. and Siegl,
		    K. and Loidl, H-W. and Jebelean, T. and Zettler, P.},
  title =        {{PACLIB User Manual}},
  institution =  {RISC-Linz},
  address =      {Johannes Kepler University, Linz, Austria},
  month =        {May},
  year =         {1992},
  number =       {92-32},
  OPTnote =         {Also: Technical Report ACPC/TR 92-9, ACPC Technical Report Series, Austrian Center for Parallel Computation, July 1992},
}

@InProceedings{loidl92:_paral_compil_funct_progr_languag_eve,
  author = 	 {Loidl, H-W.},
  title = 	 {{A Parallelizing Compiler for the Functional Programming Language EVE}},
  booktitle =	 {{Austrian-Hungarian Workshop on Transputer Applications}},
  pages =	 {1--10},
  year =	 1992,
  series =	 {Technical Report Series of the Hungarian Academy of Sciences},
  address =	 {Sopron, Hungary, October 8-10},
  url = {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/hungary.ps.gz},
}

@InProceedings{PARS92,
  author = 	 {Loidl, H-W.},
  title = 	 {{Compiling Functional Programs to Dataflow Code}},
  booktitle =	 {{Workshop on Parallel Computers and Programming Languages}},
  pages =	 {78--86},
  year =	 1992,
  number =	 10,
  series =	 {Communications of the German Computer Society PARS},
  address =	 {Dagstuhl Castle, Germany, February 26--28},
  url = {http://www.macs.hw.ac.uk/~hwloidl/publications/Glasgow/pars.ps.gz},
}

@TechReport{CircPrgs92,
  author = 	 {Loidl, H-W.},
  title = 	 {{Circular Programs on Compound Data Structures}},
  institution =  {RISC-Linz},
  year = 	 1992,
  type =	 {Technical Report},
  number =	 {92-06},
  address =	 {Johannes Kepler University, Linz, Austria},
  month =	 {January},
}

% Local Variables:
% TeX-parse-self: t
% TeX-auto-save: t
% mode: bibtex
% minor-mode: outline
% outline-minor-mode: nil
% outline-regexp: "[%\f]+"
% End:
