<?xml version="1.0"?>

<!DOCTYPE owl [
	<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">
	<!ENTITY rdfs "http://www.w3.org/2000/01/rdf-schema#">
	<!ENTITY xsd "http://www.w3.org/2001/XMLSchema#">
	<!ENTITY owl "http://www.w3.org/2002/07/owl#">
	<!ENTITY cc "http://web.resource.org/cc/#">
	<!ENTITY event "http://ebiquity.umbc.edu/ontology/event.owl#">
	<!ENTITY person "http://ebiquity.umbc.edu/ontology/person.owl#">
	<!ENTITY assert "http://ebiquity.umbc.edu/ontology/assertion.owl#">
]>

<!--

This ontology document is licensed under the Creative Commons
Attribution License. To view a copy of this license, visit
http://creativecommons.org/licenses/by/2.0/ or send a letter to
Creative Commons, 559 Nathan Abbott Way, Stanford, California
94305, USA.

-->

<rdf:RDF 
		xmlns:rdf = "&rdf;"
		xmlns:rdfs = "&rdfs;"
		xmlns:xsd = "&xsd;"
		xmlns:owl = "&owl;"
		xmlns:cc = "&cc;"
		xmlns:event = "&event;"
		xmlns:person = "&person;"
		xmlns:assert = "&assert;">
	<event:Event rdf:about="http://ebiquity.umbc.edu/event/html/id/126/Learning-to-Win">
		<rdfs:label><![CDATA[Learning to Win]]></rdfs:label>
		<event:title><![CDATA[Learning to Win]]></event:title>
		<event:speaker>
<person:Collaborator rdf:about="http://ebiquity.umbc.edu/person/html/David/Aha"><person:name><![CDATA[David Aha]]></person:name><rdfs:label><![CDATA[David Aha]]></rdfs:label></person:Collaborator>
		</event:speaker>
		<event:startDate rdf:datatype="&xsd;dateTime">2005-11-18T13:00:00-05:00</event:startDate>
		<event:location><![CDATA[325b ITE]]></event:location>
		<event:abstract><![CDATA[Repositories of standard-format datasets and analysis tasks
defined on them have critically contributed to machine
learning investigations, but their access has led to some
ideological stagnation. While they allow researchers to
conduct comparative empirical analyses of supervised learning
algorithms on a massive scale, this led to many dull
contributions on small improvements to knowledge-poor
algorithms, which became easier to publish than more ambitious
investigations. For example, evaluating knowledge-intensive
learning approaches on challenging tasks from virtual
simulation environments is far more difficult.
<p>
We have developed TIELT (Testbed for Integrating and
Evaluating Learning Techniques) to address this problem. By
integrating their (e.g., machine learning) system with TIELT,
researchers will gain access to previously integrated
simulators and comparison systems, which they can test versus
their own on tasks they select using the evaluation
methodology they encode. TIELT, a freely available and
supported tool (http://nrlsat.ittid.com), is maturing through
the efforts of industry and academic partners. I will describe
our initial stress test of TIELT (Aha et al., 2005), which
involved a case-based approach for learning to select player
actions to win a real-time strategy game. I'll also describe
our plans for using it to support DARPA challenge problems,
explain its role in future gaming competitions, and encourage
its use (e.g., in class projects). This work is being
performed in conjunction with Matthew Molineaux (ITT
Industries) and Marc Ponsen (University of Maastricht, The
Netherlands).
<p>
Reference: Aha, D.W., Molineaux, M., & Ponsen,
M. (2005). Learning to win: Case-based plan selection in a
real-time strategy game. Proceedings of the Sixth
International Conference on Case-Based Reasoning
(pp. 5-20). Chicago, IL: Springer. (Winner, Best Paper Award)
]]></event:abstract>
		<event:host>
<person:Collaborator rdf:about="http://ebiquity.umbc.edu/person/html/Marie/desJardins"><person:name><![CDATA[Marie desJardins]]></person:name><rdfs:label><![CDATA[Marie desJardins]]></rdfs:label></person:Collaborator>
		</event:host>
	</event:Event>

<rdf:Description rdf:about="">
	<cc:License rdf:resource="http://creativecommons.org/licenses/by/2.0/" />
</rdf:Description>

</rdf:RDF>
