Skip to content

Commit 69022ef

Browse files
committed
-bibliography
1 parent efad9e0 commit 69022ef

3 files changed

Lines changed: 28 additions & 179 deletions

File tree

tex/Bibliography.bib

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,5 +155,11 @@ @Article{
155155
file = {:http\://arxiv.org/pdf/2005.01643v3:PDF;:http\://arxiv.org/pdf/2005.01643v3:},
156156
keywords = {cs.LG, cs.AI, stat.ML},
157157
}
158-
158+
@article{article,
159+
author = {Lagoudakis, Michail and Parr, Ronald and Bartlett, L.},
160+
year = {2004},
161+
month = {01},
162+
pages = {},
163+
title = {Journal of Machine Learning Research 4 (2003) 1107-1149 Submitted 8/02; Published 12/03 Least-Squares Policy Iteration}
164+
}
159165
@Comment{jabref-meta: databaseType:bibtex;}

tex/Bibliography.bib.bak

Lines changed: 21 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,5 @@
11
% Encoding: UTF-8
22

3-
@Article{Levine2020,
4-
author = {Sergey Levine and Aviral Kumar and George Tucker and Justin Fu},
5-
title = {Offline Reinforcement Learning: Tutorial, Review, and Perspectives on Open Problems},
6-
abstract = {In this tutorial article, we aim to provide the reader with the conceptual tools needed to get started on research on offline reinforcement learning algorithms: reinforcement learning algorithms that utilize previously collected data, without additional online data collection. Offline reinforcement learning algorithms hold tremendous promise for making it possible to turn large datasets into powerful decision making engines. Effective offline reinforcement learning methods would be able to extract policies with the maximum possible utility out of the available data, thereby allowing automation of a wide range of decision-making domains, from healthcare and education to robotics. However, the limitations of current algorithms make this difficult. We will aim to provide the reader with an understanding of these challenges, particularly in the context of modern deep reinforcement learning methods, and describe some potential solutions that have been explored in recent work to mitigate these challenges, along with recent applications, and a discussion of perspectives on open problems in the field.},
7-
date = {2020-05-04},
8-
eprint = {2005.01643v3},
9-
eprintclass = {cs.LG},
10-
eprinttype = {arXiv},
11-
file = {:http\://arxiv.org/pdf/2005.01643v3:PDF},
12-
keywords = {cs.LG, cs.AI, stat.ML},
13-
}
14-
153
@WWW{,
164
url = {https://gym.openai.com},
175
}
@@ -143,16 +131,29 @@
143131
keywords = {cs.LG, cs.AI, cs.RO, stat.ML},
144132
}
145133

146-
@Article{Wang2019a,
147-
author = {Tingwu Wang and Xuchan Bao and Ignasi Clavera and Jerrick Hoang and Yeming Wen and Eric Langlois and Shunshi Zhang and Guodong Zhang and Pieter Abbeel and Jimmy Ba},
148-
date = {2019-07-03},
149-
title = {Benchmarking Model-Based Reinforcement Learning},
150-
eprint = {1907.02057v1},
134+
@Article{Pearce2018,
135+
author = {Tim Pearce and Felix Leibfried and Alexandra Brintrup and Mohamed Zaki and Andy Neely},
136+
title = {Uncertainty in Neural Networks: Approximately Bayesian Ensembling},
137+
abstract = {Understanding the uncertainty of a neural network's (NN) predictions is essential for many purposes. The Bayesian framework provides a principled approach to this, however applying it to NNs is challenging due to large numbers of parameters and data. Ensembling NNs provides an easily implementable, scalable method for uncertainty quantification, however, it has been criticised for not being Bayesian. This work proposes one modification to the usual process that we argue does result in approximate Bayesian inference; regularising parameters about values drawn from a distribution which can be set equal to the prior. A theoretical analysis of the procedure in a simplified setting suggests the recovered posterior is centred correctly but tends to have an underestimated marginal variance, and overestimated correlation. However, two conditions can lead to exact recovery. We argue that these conditions are partially present in NNs. Empirical evaluations demonstrate it has an advantage over standard ensembling, and is competitive with variational methods.},
138+
date = {2018-10-12},
139+
eprint = {1810.05546},
140+
eprintclass = {stat.ML},
141+
eprinttype = {arXiv},
142+
file = {:http\://arxiv.org/pdf/1810.05546v5:PDF},
143+
journaltitle = {The 23rd International Conference on Artificial Intelligence and Statistics, AISTATS 2020},
144+
keywords = {stat.ML, cs.LG},
145+
}
146+
147+
@Article{,
148+
author = {Sergey Levine and Aviral Kumar and George Tucker and Justin Fu},
149+
title = {Offline Reinforcement Learning: Tutorial, Review, and Perspectives on Open Problems},
150+
abstract = {In this tutorial article, we aim to provide the reader with the conceptual tools needed to get started on research on offline reinforcement learning algorithms: reinforcement learning algorithms that utilize previously collected data, without additional online data collection. Offline reinforcement learning algorithms hold tremendous promise for making it possible to turn large datasets into powerful decision making engines. Effective offline reinforcement learning methods would be able to extract policies with the maximum possible utility out of the available data, thereby allowing automation of a wide range of decision-making domains, from healthcare and education to robotics. However, the limitations of current algorithms make this difficult. We will aim to provide the reader with an understanding of these challenges, particularly in the context of modern deep reinforcement learning methods, and describe some potential solutions that have been explored in recent work to mitigate these challenges, along with recent applications, and a discussion of perspectives on open problems in the field.},
151+
date = {2020-05-04},
152+
eprint = {2005.01643},
151153
eprintclass = {cs.LG},
152154
eprinttype = {arXiv},
153-
abstract = {Model-based reinforcement learning (MBRL) is widely seen as having the potential to be significantly more sample efficient than model-free RL. However, research in model-based RL has not been very standardized. It is fairly common for authors to experiment with self-designed environments, and there are several separate lines of research, which are sometimes closed-sourced or not reproducible. Accordingly, it is an open question how these various existing MBRL algorithms perform relative to each other. To facilitate research in MBRL, in this paper we gather a wide collection of MBRL algorithms and propose over 18 benchmarking environments specially designed for MBRL. We benchmark these algorithms with unified problem settings, including noisy environments. Beyond cataloguing performance, we explore and unify the underlying algorithmic differences across MBRL algorithms. We characterize three key research challenges for future MBRL research: the dynamics bottleneck, the planning horizon dilemma, and the early-termination dilemma. Finally, to maximally facilitate future research on MBRL, we open-source our benchmark in http://www.cs.toronto.edu/~tingwuwang/mbrl.html.},
154-
file = {:http\://arxiv.org/pdf/1907.02057v1:PDF},
155-
keywords = {cs.LG, cs.AI, cs.RO, stat.ML},
155+
file = {:http\://arxiv.org/pdf/2005.01643v3:PDF;:http\://arxiv.org/pdf/2005.01643v3:},
156+
keywords = {cs.LG, cs.AI, stat.ML},
156157
}
157158

158159
@Comment{jabref-meta: databaseType:bibtex;}

0 commit comments

Comments
 (0)