repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null | orc-main/java/tools/src/test/org/apache/orc/tools/convert/TestCsvReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.StringReader;
import java.util.Locale;
import static org.apache.orc.tools.convert.ConvertTool.DEFAULT_TIMESTAMP_FORMAT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestCsvReader {
Locale defaultLocale;
@BeforeEach
public void storeDefaultLocale() {
defaultLocale = Locale.getDefault();
Locale.setDefault(Locale.US);
}
@AfterEach
public void restoreDefaultLocale() {
Locale.setDefault(defaultLocale);
}
@Test
public void testSimple() throws Exception {
// yyyy[[-][/]]MM[[-][/]]dd[['T'][ ]]HH:mm:ss[ ][XXX][X]
StringReader input = new StringReader(
"1,1.25,1.01,'a',f,'2000-01-01T00:00:00+00:00'\n" +
"2,2.5,2.02,'14',t,'2000/01/01T00:00:00+00'\n" +
"3,3.75,3.03,'1e',false,'2000-01-01T00:00:00Z'\n" +
"4,5,4.04,'28',true,'2000-01-01 00:00:00+00'\n" +
"5,6.25,5.05,'32',0,'2000-01-01 00:00:00-00'\n" +
"6,7.5,6.06,'3c',1,'2000-01-01T04:00:00+04'\n" +
"7,8.75,7.07,'46',2,'1999-12-31T20:00:00-04:00'\n" +
"8,10,8.08,'50',t,'2000-01-01T00:00:00+00'\n"
);
TypeDescription schema = TypeDescription.fromString(
"struct<a:int,b:double,c:decimal(10,2),d:string,e:boolean,e:timestamp>");
RecordReader reader = new CsvReader(input, null, 1, schema, ',', '\'',
'\\', 0, "", DEFAULT_TIMESTAMP_FORMAT);
VectorizedRowBatch batch = schema.createRowBatch(5);
assertTrue(reader.nextBatch(batch));
assertEquals(5, batch.size);
long bool = 0;
for(int r = 0; r < batch.size; ++r) {
assertEquals(r+1, ((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(1.25 * (r + 1), ((DoubleColumnVector) batch.cols[1]).vector[r], 0.001);
assertEquals((r + 1) + ".0" + (r + 1), ((DecimalColumnVector) batch.cols[2]).vector[r].toFormatString(2));
assertEquals(Integer.toHexString((r + 1) * 10), ((BytesColumnVector) batch.cols[3]).toString(r));
assertEquals(bool, ((LongColumnVector) batch.cols[4]).vector[r]);
bool = 1 - bool;
assertEquals(946684800000L, ((TimestampColumnVector) batch.cols[5]).getTime(r));
}
assertTrue(reader.nextBatch(batch));
assertEquals(3, batch.size);
for(int r = 0; r < batch.size; ++r) {
assertEquals(r + 6, ((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(1.25 * (r + 6), ((DoubleColumnVector) batch.cols[1]).vector[r], 0.001);
assertEquals((r + 6) + ".0" + (r + 6), ((DecimalColumnVector) batch.cols[2]).vector[r].toFormatString(2));
assertEquals(Integer.toHexString((r + 6) * 10), ((BytesColumnVector) batch.cols[3]).toString(r));
assertEquals(bool, ((LongColumnVector) batch.cols[4]).vector[r]);
bool = 1 - bool;
assertEquals(946684800000L, ((TimestampColumnVector) batch.cols[5]).getTime(r));
}
assertFalse(reader.nextBatch(batch));
}
@Test
public void testNulls() throws Exception {
StringReader input = new StringReader(
"1,1,1,'a'\n" +
"'null','null','null','null'\n" +
"3,3,3,'row 3'\n"
);
TypeDescription schema = TypeDescription.fromString(
"struct<a:int,b:double,c:decimal(10,2),d:string>");
RecordReader reader = new CsvReader(input, null, 1, schema, ',', '\'',
'\\', 0, "null", DEFAULT_TIMESTAMP_FORMAT);
VectorizedRowBatch batch = schema.createRowBatch();
assertTrue(reader.nextBatch(batch));
assertEquals(3, batch.size);
for(int c=0; c < 4; ++c) {
assertFalse(batch.cols[c].noNulls, "column " + c);
}
// check row 0
assertEquals(1, ((LongColumnVector) batch.cols[0]).vector[0]);
assertEquals(1, ((DoubleColumnVector) batch.cols[1]).vector[0], 0.001);
assertEquals("1", ((DecimalColumnVector) batch.cols[2]).vector[0].toString());
assertEquals("a", ((BytesColumnVector) batch.cols[3]).toString(0));
for(int c=0; c < 4; ++c) {
assertFalse(batch.cols[c].isNull[0], "column " + c);
}
// row 1
for(int c=0; c < 4; ++c) {
assertTrue(batch.cols[c].isNull[1], "column " + c);
}
// check row 2
assertEquals(3, ((LongColumnVector) batch.cols[0]).vector[2]);
assertEquals(3, ((DoubleColumnVector) batch.cols[1]).vector[2], 0.001);
assertEquals("3", ((DecimalColumnVector) batch.cols[2]).vector[2].toString());
assertEquals("row 3", ((BytesColumnVector) batch.cols[3]).toString(2));
for(int c=0; c < 4; ++c) {
assertFalse(batch.cols[c].isNull[2], "column " + c);
}
}
@Test
public void testStructs() throws Exception {
StringReader input = new StringReader(
"1,2,3,4\n" +
"5,6,7,8\n"
);
TypeDescription schema = TypeDescription.fromString(
"struct<a:int,b:struct<c:int,d:int>,e:int>");
RecordReader reader = new CsvReader(input, null, 1, schema, ',', '\'',
'\\', 0, "null", DEFAULT_TIMESTAMP_FORMAT);
VectorizedRowBatch batch = schema.createRowBatch();
assertTrue(reader.nextBatch(batch));
assertEquals(2, batch.size);
int nextVal = 1;
for(int r=0; r < 2; ++r) {
assertEquals(nextVal++, ((LongColumnVector) batch.cols[0]).vector[r], "row " + r);
StructColumnVector b = (StructColumnVector) batch.cols[1];
assertEquals(nextVal++, ((LongColumnVector) b.fields[0]).vector[r], "row " + r);
assertEquals(nextVal++, ((LongColumnVector) b.fields[1]).vector[r], "row " + r);
assertEquals(nextVal++, ((LongColumnVector) batch.cols[2]).vector[r], "row " + r);
}
assertFalse(reader.nextBatch(batch));
}
@Test
public void testLargeNumbers() throws Exception {
StringReader input = new StringReader(
"2147483646,-2147483647,9223372036854775806,-9223372036854775807\n"
);
TypeDescription schema = TypeDescription.fromString(
"struct<a:int,b:int,d:bigint,e:bigint>");
RecordReader reader = new CsvReader(input, null, 1, schema, ',', '\'',
'\\', 0, "null", DEFAULT_TIMESTAMP_FORMAT);
VectorizedRowBatch batch = schema.createRowBatch();
assertTrue(reader.nextBatch(batch));
assertEquals(1, batch.size);
assertEquals(2147483646, ((LongColumnVector) batch.cols[0]).vector[0]);
assertEquals(-2147483647, ((LongColumnVector) batch.cols[1]).vector[0]);
assertEquals(9223372036854775806L, ((LongColumnVector) batch.cols[2]).vector[0]);
assertEquals(-9223372036854775807L, ((LongColumnVector) batch.cols[3]).vector[0]);
assertFalse(reader.nextBatch(batch));
}
@Test
public void testCustomTimestampFormat() throws Exception {
String tsFormat = "d[d] MMM yyyy HH:mm:ss.SSSSSS";
StringReader input = new StringReader(
"'21 Mar 2018 12:23:34.123456'\n" +
"'3 Feb 2018 18:04:51.456789'\n"
);
TypeDescription schema = TypeDescription.fromString(
"struct<a:timestamp>");
RecordReader reader = new CsvReader(input, null, 1, schema, ',', '\'',
'\\', 0, "", tsFormat);
VectorizedRowBatch batch = schema.createRowBatch(2);
assertTrue(reader.nextBatch(batch));
assertEquals(2, batch.size);
TimestampColumnVector cv = (TimestampColumnVector) batch.cols[0];
assertEquals("2018-03-21 12:23:34.123456", cv.asScratchTimestamp(0).toString());
assertEquals("2018-02-03 18:04:51.456789", cv.asScratchTimestamp(1).toString());
}
}
| 8,913 | 42.271845 | 112 | java |
null | orc-main/java/tools/src/test/org/apache/orc/tools/convert/TestJsonReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.io.StringReader;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestJsonReader {
@Test
public void testCustomTimestampFormat() throws Exception {
String tsFormat = "yyyy-MM-dd HH:mm:ss.SSSSSS";
String s = "{\"a\":\"2018-03-21 12:23:34.123456\"}\n" +
"{\"a\":\"2018-02-03 18:04:51.456789\"}\n";
StringReader input = new StringReader(s);
TypeDescription schema = TypeDescription.fromString(
"struct<a:timestamp>");
JsonReader reader = new JsonReader(input, null, 1, schema, tsFormat);
VectorizedRowBatch batch = schema.createRowBatch(2);
assertTrue(reader.nextBatch(batch));
assertEquals(2, batch.size);
TimestampColumnVector cv = (TimestampColumnVector) batch.cols[0];
assertEquals("2018-03-21 12:23:34.123456", cv.asScratchTimestamp(0).toString());
assertEquals("2018-02-03 18:04:51.456789", cv.asScratchTimestamp(1).toString());
}
@Test
public void testTimestampOffByOne() throws Exception {
String tsFormat = "yyyy-MM-dd HH:mm:ss.SSSS";
String s = "{\"a\": \"1970-01-01 00:00:00.0001\"}\n" +
"{\"a\": \"1970-01-01 00:00:00.0000\"}\n" +
"{\"a\": \"1969-12-31 23:59:59.9999\"}\n" +
"{\"a\": \"1969-12-31 23:59:59.0001\"}\n" +
"{\"a\": \"1969-12-31 23:59:59.0000\"}\n" +
"{\"a\": \"1969-12-31 23:59:58.9999\"}";
StringReader input = new StringReader(s);
TypeDescription schema = TypeDescription.fromString(
"struct<a:timestamp>");
JsonReader reader = new JsonReader(input, null, 1, schema, tsFormat);
VectorizedRowBatch batch = schema.createRowBatch(6);
assertTrue(reader.nextBatch(batch));
assertEquals(6, batch.size);
TimestampColumnVector cv = (TimestampColumnVector) batch.cols[0];
assertEquals("1970-01-01 00:00:00.0001", cv.asScratchTimestamp(0).toString());
assertEquals("1970-01-01 00:00:00.0", cv.asScratchTimestamp(1).toString());
assertEquals("1969-12-31 23:59:59.9999", cv.asScratchTimestamp(2).toString());
assertEquals("1969-12-31 23:59:59.0001", cv.asScratchTimestamp(3).toString());
assertEquals("1969-12-31 23:59:59.0", cv.asScratchTimestamp(4).toString());
assertEquals("1969-12-31 23:59:58.9999", cv.asScratchTimestamp(5).toString());
}
@Test
public void testDateTypeSupport() throws IOException {
LocalDate date1 = LocalDate.of(2021, 1, 18);
LocalDate date2 = LocalDate.now();
String inputString = "{\"dt\": \"" + date1 + "\"}\n" +
"{\"dt\": \"" + date2 + "\"}\n" +
"{\"dt\": \"" + date2 + "\"}\n" +
"{\"dt\": null}";
StringReader input = new StringReader(inputString);
TypeDescription schema = TypeDescription.fromString("struct<dt:date>");
JsonReader reader = new JsonReader(input, null, 1, schema, "");
VectorizedRowBatch batch = schema.createRowBatch(4);
assertTrue(reader.nextBatch(batch));
assertEquals(4, batch.size);
DateColumnVector cv = (DateColumnVector) batch.cols[0];
assertEquals(date1, LocalDate.ofEpochDay(cv.vector[0]));
assertEquals(date2, LocalDate.ofEpochDay(cv.vector[1]));
assertEquals(date2, LocalDate.ofEpochDay(cv.vector[2]));
assertFalse(cv.isNull[2]);
assertTrue(cv.isNull[3]);
}
@Test
public void testDateTimeTypeSupport() throws IOException {
String timestampFormat = "yyyy[[-][/]]MM[[-][/]]dd[['T'][ ]]HH:mm:ss[['.'][ ]][[SSSSSSSSS][SSSSSS][SSS]][[X][Z]['['VV']']]";
LocalDateTime datetime1 = LocalDateTime.of(2021, 1, 18, 1, 2, 3, 4);
LocalDateTime datetime2 = LocalDateTime.now();
OffsetDateTime datetime3 = OffsetDateTime.of(datetime1, ZoneOffset.UTC);
OffsetDateTime datetime4 = OffsetDateTime.of(datetime2, ZoneOffset.ofHours(-7));
ZonedDateTime datetime5 = ZonedDateTime.of(datetime1, ZoneId.of("UTC"));
ZonedDateTime datetime6 = ZonedDateTime.of(datetime2, ZoneId.of("America/New_York"));
String datetime4Str = datetime4.toString();
datetime4Str = datetime4Str.substring(0, datetime4Str.length() - 5) + "0700";
String inputString = "{\"dt\": \"" + datetime1 + "\"}\n" +
"{\"dt\": \"" + datetime2 + "\"}\n" +
"{\"dt\": \"" + datetime3 + "\"}\n" +
"{\"dt\": \"" + datetime4Str + "\"}\n" +
"{\"dt\": \"" + datetime5.toLocalDateTime().toString() + "[" + datetime5.getZone() + "]\"}\n" +
"{\"dt\": \"" + datetime6.toLocalDateTime().toString() + "[" + datetime6.getZone() + "]\"}\n";
StringReader input = new StringReader(inputString);
TypeDescription schema = TypeDescription.fromString("struct<dt:timestamp>");
JsonReader reader = new JsonReader(input, null, 1, schema, timestampFormat);
VectorizedRowBatch batch = schema.createRowBatch(6);
assertTrue(reader.nextBatch(batch));
assertEquals(6, batch.size);
TimestampColumnVector cv = (TimestampColumnVector) batch.cols[0];
assertEquals(datetime1, LocalDateTime.from(cv.asScratchTimestamp(0).toLocalDateTime()));
assertEquals(datetime2, LocalDateTime.from(cv.asScratchTimestamp(1).toLocalDateTime()));
assertEquals(datetime3.toInstant(), cv.asScratchTimestamp(2).toInstant());
assertEquals(datetime4.toInstant(), cv.asScratchTimestamp(3).toInstant());
assertEquals(datetime5.toInstant(), cv.asScratchTimestamp(4).toInstant());
assertEquals(datetime6.toInstant(), cv.asScratchTimestamp(5).toInstant());
}
@Test
public void testUnionTypeSupport() throws IOException {
String inputString = "{\"foo\": {\"tag\": 0, \"value\": 1}}\n" +
"{\"foo\": {\"tag\": 1, \"value\": \"testing\"}}\n" +
"{\"foo\": {\"tag\": 0, \"value\": 3}}";
StringReader input = new StringReader(inputString);
TypeDescription schema = TypeDescription.fromString("struct<foo:uniontype<int,string>>");
JsonReader reader = new JsonReader(input, null, 1, schema, "", "tag", "value");
VectorizedRowBatch batch = schema.createRowBatch(3);
assertTrue(reader.nextBatch(batch));
assertEquals(3, batch.size);
UnionColumnVector union = (UnionColumnVector) batch.cols[0];
LongColumnVector longs = (LongColumnVector) union.fields[0];
BytesColumnVector strs = (BytesColumnVector) union.fields[1];
assertTrue(union.noNulls);
assertFalse(union.isNull[0]);
assertEquals(0, union.tags[0]);
assertEquals(1, longs.vector[0]);
assertFalse(union.isNull[1]);
assertEquals(1, union.tags[1]);
assertEquals("testing", strs.toString(1));
assertFalse(union.isNull[2]);
assertEquals(0, union.tags[2]);
assertEquals(3, longs.vector[2]);
}
}
| 8,795 | 48.41573 | 132 | java |
null | orc-main/java/tools/src/test/org/apache/orc/tools/json/TestJsonSchemaFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import com.google.gson.JsonArray;
import com.google.gson.JsonNull;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import com.google.gson.internal.LazilyParsedNumber;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestJsonSchemaFinder {
@Test
public void testBinaryPatterns() throws Exception {
assertEquals("binary",
JsonSchemaFinder.pickType(new JsonPrimitive("00000000")).toString());
assertEquals("string",
JsonSchemaFinder.pickType(new JsonPrimitive("0000000")).toString());
assertEquals("string",
JsonSchemaFinder.pickType(new JsonPrimitive("")).toString());
assertEquals("binary",
JsonSchemaFinder.pickType(new JsonPrimitive("0123456789abcdefABCDEF")).toString());
assertEquals("string",
JsonSchemaFinder.pickType(new JsonPrimitive("00x0")).toString());
}
@Test
public void testTimestampPatterns() throws Exception {
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2016-01-05T12:34:56Z")).toString());
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2016/01/05 12:34:56")).toString());
assertEquals("string",
JsonSchemaFinder.pickType(new JsonPrimitive("2016/01/05")).toString());
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2016-01-01 16:00:00 +08")).toString());
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2016-01-01 16:00:00+08")).toString());
assertEquals("string",
JsonSchemaFinder.pickType(new JsonPrimitive("2016-01-01 16:00:0008")).toString());
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2016-01-01 06:00:00 -08:30")).toString());
assertEquals("timestamp",
JsonSchemaFinder.pickType(new JsonPrimitive("2017-05-31T12:44:40-04:00")).toString());
}
@Test
public void testBooleans() throws Exception {
assertEquals("boolean",
JsonSchemaFinder.pickType(new JsonPrimitive(true)).toString());
assertEquals("void",
JsonSchemaFinder.pickType(JsonNull.INSTANCE).toString());
assertEquals("boolean",
JsonSchemaFinder.pickType(new JsonPrimitive(false)).toString());
}
@Test
public void testNumbers() throws Exception {
assertEquals("tinyint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("120"))).toString());
assertEquals("tinyint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-128"))).toString());
assertEquals("smallint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-24120"))).toString());
assertEquals("smallint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("128"))).toString());
assertEquals("int",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("60000"))).toString());
assertEquals("bigint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-4294967296"))).toString());
assertEquals("bigint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-9223372036854775808"))).toString());
assertEquals("bigint",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("9223372036854775807"))).toString());
assertEquals("decimal(19,0)",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("9223372036854775808"))).toString());
assertEquals("decimal(19,0)",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-9223372036854775809"))).toString());
assertEquals("decimal(10,6)",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("1234.567890"))).toString());
assertEquals("decimal(20,10)",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("-1234567890.1234567890"))).toString());
assertEquals("float",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("1.2e9"))).toString());
assertEquals("double",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("1234567890123456789012345678901234567890"))).toString());
assertEquals("double",
JsonSchemaFinder.pickType(new JsonPrimitive
(new LazilyParsedNumber("1.2E40"))).toString());
// Make the schema
assertEquals("decimal(3,2)",
JsonSchemaFinder.pickType(new JsonPrimitive(
new LazilyParsedNumber("1.23"))).getSchema().toString());
}
@Test
public void testLists() throws Exception {
assertEquals("list<void>",
JsonSchemaFinder.pickType(new JsonArray()).toString());
JsonArray list = new JsonArray();
list.add(new JsonPrimitive(50000));
assertEquals("list<int>", JsonSchemaFinder.pickType(list).toString());
list = new JsonArray();
list.add(new JsonPrimitive(127));
list.add(new JsonPrimitive(50000));
list.add(new JsonPrimitive(50000000000L));
list.add(new JsonPrimitive(-100));
assertEquals("list<bigint>", JsonSchemaFinder.pickType(list).toString());
}
@Test
public void testStructs() throws Exception {
assertEquals("struct<>",
JsonSchemaFinder.pickType(new JsonObject()).toString());
JsonObject struct = new JsonObject();
struct.addProperty("bool", true);
assertEquals("struct<bool:boolean>",
JsonSchemaFinder.pickType(struct).toString());
struct = new JsonObject();
struct.addProperty("str", "value");
struct.addProperty("i", new LazilyParsedNumber("124567"));
assertEquals("struct<i:int,str:string>",
JsonSchemaFinder.pickType(struct).toString());
}
@Test
public void testNullMerges() throws Exception {
assertEquals("void", JsonSchemaFinder.mergeType(
new NullType(),
new NullType()).toString());
assertEquals("boolean", JsonSchemaFinder.mergeType(
new BooleanType(),
new NullType()).toString());
assertEquals("int", JsonSchemaFinder.mergeType(
new NullType(),
new NumericType(HiveType.Kind.INT, 4, 0)
).toString());
assertEquals("string", JsonSchemaFinder.mergeType(
new NullType(),
new StringType(HiveType.Kind.STRING)
).toString());
assertEquals("struct<i:int>", JsonSchemaFinder.mergeType(
new StructType().addField("i", new NumericType(HiveType.Kind.INT, 5, 0)),
new NullType()
).toString());
assertEquals("list<int>", JsonSchemaFinder.mergeType(
new ListType(new NumericType(HiveType.Kind.INT, 5, 0)),
new NullType()
).toString());
assertEquals("uniontype<int>", JsonSchemaFinder.mergeType(
new UnionType().addType(new NumericType(HiveType.Kind.INT, 5, 0)),
new NullType()
).toString());
}
@Test
public void testBooleanMerges() throws Exception {
assertEquals("boolean", JsonSchemaFinder.mergeType(
new BooleanType(),
new BooleanType()).toString());
assertEquals("uniontype<boolean,int>", JsonSchemaFinder.mergeType(
new BooleanType(),
new NumericType(HiveType.Kind.INT, 4, 0)
).toString());
assertEquals("uniontype<boolean,string>", JsonSchemaFinder.mergeType(
new BooleanType(),
new StringType(HiveType.Kind.STRING)
).toString());
assertEquals("uniontype<struct<i:int>,boolean>", JsonSchemaFinder.mergeType(
new StructType().addField("i", new NumericType(HiveType.Kind.INT, 5, 0)),
new BooleanType()
).toString());
assertEquals("uniontype<list<int>,boolean>", JsonSchemaFinder.mergeType(
new ListType(new NumericType(HiveType.Kind.INT, 5, 0)),
new BooleanType()
).toString());
assertEquals("uniontype<int,boolean>", JsonSchemaFinder.mergeType(
new UnionType().addType(new NumericType(HiveType.Kind.INT, 5, 0)),
new BooleanType()
).toString());
}
@Test
public void testNumericMerges() throws Exception {
assertEquals("smallint", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.BYTE, 2, 0),
new NumericType(HiveType.Kind.SHORT, 4, 0)
).toString());
assertEquals("int", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.INT, 6, 0),
new NumericType(HiveType.Kind.SHORT, 4, 0)
).toString());
assertEquals("bigint", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.INT, 6, 0),
new NumericType(HiveType.Kind.LONG, 10, 0)
).toString());
assertEquals("decimal(20,0)", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.SHORT, 4, 0),
new NumericType(HiveType.Kind.DECIMAL, 20, 0)
).toString());
assertEquals("float", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.FLOAT, 21, 4),
new NumericType(HiveType.Kind.DECIMAL, 20, 0)
).toString());
assertEquals("double", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.DOUBLE, 31, 4),
new NumericType(HiveType.Kind.DECIMAL, 20, 10)
).toString());
assertEquals("uniontype<decimal(30,10),string>", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.DECIMAL, 20, 10),
new StringType(HiveType.Kind.STRING)
).toString());
assertEquals("uniontype<struct<i:int>,smallint>", JsonSchemaFinder.mergeType(
new StructType().addField("i", new NumericType(HiveType.Kind.INT, 5, 0)),
new NumericType(HiveType.Kind.SHORT, 4, 0)
).toString());
assertEquals("uniontype<smallint,list<int>>", JsonSchemaFinder.mergeType(
new NumericType(HiveType.Kind.SHORT, 4, 0),
new ListType(new NumericType(HiveType.Kind.INT, 5, 0))
).toString());
assertEquals("uniontype<decimal(20,0),string>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
.addType(new StringType(HiveType.Kind.STRING)),
new NumericType(HiveType.Kind.DECIMAL, 20, 0)
).toString());
}
@Test
public void testStringMerges() throws Exception {
assertEquals("string", JsonSchemaFinder.mergeType(
new StringType(HiveType.Kind.BINARY),
new StringType(HiveType.Kind.STRING)
).toString());
assertEquals("string", JsonSchemaFinder.mergeType(
new StringType(HiveType.Kind.STRING),
new StringType(HiveType.Kind.TIMESTAMP)
).toString());
assertEquals("uniontype<struct<i:int>,timestamp>", JsonSchemaFinder.mergeType(
new StructType().addField("i", new NumericType(HiveType.Kind.INT, 5, 0)),
new StringType(HiveType.Kind.TIMESTAMP)
).toString());
assertEquals("uniontype<binary,list<int>>", JsonSchemaFinder.mergeType(
new StringType(HiveType.Kind.BINARY),
new ListType(new NumericType(HiveType.Kind.INT, 5, 0))
).toString());
assertEquals("uniontype<int,string>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
.addType(new StringType(HiveType.Kind.STRING)),
new StringType(HiveType.Kind.TIMESTAMP)
).toString());
}
@Test
public void testListMerges() throws Exception {
assertEquals("list<bigint>", JsonSchemaFinder.mergeType(
new ListType(new NumericType(HiveType.Kind.INT, 10, 0)),
new ListType(new NumericType(HiveType.Kind.LONG, 20, 0))
).toString());
assertEquals("list<uniontype<int,string>>", JsonSchemaFinder.mergeType(
new ListType(new NumericType(HiveType.Kind.INT, 10, 0)),
new ListType(new StringType(HiveType.Kind.STRING))
).toString());
assertEquals("uniontype<struct<foo:int>,list<int>>", JsonSchemaFinder.mergeType(
new StructType().addField("foo", new NumericType(HiveType.Kind.INT, 10, 0)),
new ListType(new NumericType(HiveType.Kind.INT, 5, 0))
).toString());
assertEquals("uniontype<int,string,list<boolean>>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
.addType(new StringType(HiveType.Kind.STRING)),
new ListType(new BooleanType())
).toString());
}
@Test
public void testStructMerges() throws Exception {
assertEquals("struct<bar:timestamp,foo:int>", JsonSchemaFinder.mergeType(
new StructType().addField("foo", new NumericType(HiveType.Kind.INT, 10, 0)),
new StructType().addField("bar", new StringType(HiveType.Kind.TIMESTAMP))
).toString());
assertEquals("struct<bar:string,foo:int>", JsonSchemaFinder.mergeType(
new StructType()
.addField("foo", new NumericType(HiveType.Kind.INT, 10, 0))
.addField("bar", new StringType(HiveType.Kind.BINARY)),
new StructType()
.addField("bar", new StringType(HiveType.Kind.TIMESTAMP))
).toString());
assertEquals("uniontype<int,string,struct<foo:boolean>>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
.addType(new StringType(HiveType.Kind.STRING)),
new StructType().addField("foo", new BooleanType())
).toString());
}
@Test
public void testUnionMerges() throws Exception {
assertEquals("uniontype<decimal(15,10),boolean,string>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.DECIMAL, 2, 10))
.addType(new BooleanType())
.addType(new StringType(HiveType.Kind.BINARY)),
new UnionType()
.addType(new StringType(HiveType.Kind.TIMESTAMP))
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
).toString());
assertEquals("uniontype<int,binary,struct<bar:timestamp>>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 10, 0))
.addType(new StringType(HiveType.Kind.BINARY)),
new StructType()
.addField("bar", new StringType(HiveType.Kind.TIMESTAMP))
).toString());
assertEquals("uniontype<int,string>", JsonSchemaFinder.mergeType(
new UnionType()
.addType(new NumericType(HiveType.Kind.INT, 5, 0))
.addType(new StringType(HiveType.Kind.BINARY)),
new StringType(HiveType.Kind.TIMESTAMP)
).toString());
}
@Test
public void testMapMerges() throws Exception {
assertEquals("map<decimal(15,10),string>", JsonSchemaFinder.mergeType(
new MapType(new NumericType(HiveType.Kind.DECIMAL, 2, 10),
new StringType(HiveType.Kind.TIMESTAMP)),
new MapType(new NumericType(HiveType.Kind.INT, 5, 0),
new StringType(HiveType.Kind.BINARY))
).toString());
assertEquals("map<binary,timestamp>", JsonSchemaFinder.mergeType(
new MapType(new StringType(HiveType.Kind.BINARY), new StringType(HiveType.Kind.TIMESTAMP)),
new MapType(new StringType(HiveType.Kind.BINARY), new StringType(HiveType.Kind.TIMESTAMP))
).toString());
assertEquals("map<string,string>", JsonSchemaFinder.mergeType(
new MapType(new StringType(HiveType.Kind.BINARY), new StringType(HiveType.Kind.TIMESTAMP)),
new MapType(new StringType(HiveType.Kind.TIMESTAMP), new StringType(HiveType.Kind.BINARY))
).toString());
assertEquals("struct<bar:map<struct<i:decimal(15,10),j:string>,struct<k:boolean>>,foo:int>",
JsonSchemaFinder.mergeType(
new StructType()
.addField("bar", new MapType(
new StructType()
.addField("i", new NumericType(HiveType.Kind.INT, 5, 0))
.addField("j", new StringType(HiveType.Kind.BINARY)),
new StructType()
.addField("k", new BooleanType())))
.addField("foo", new NumericType(HiveType.Kind.INT, 5, 0)),
new StructType()
.addField("bar", new MapType(
new StructType()
.addField("i", new NumericType(HiveType.Kind.DECIMAL, 2, 10))
.addField("j", new StringType(HiveType.Kind.TIMESTAMP)),
new StructType()
.addField("k", new BooleanType())))
.addField("foo", new NumericType(HiveType.Kind.INT, 5, 0))
).toString());
}
}
| 17,516 | 43.346835 | 99 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/BinaryLeafNode.java | package decision_trees.classifiers;
import features.FeatureVector;
/**
* Leaf node in a feature-based decision tree, with probabilities for classes.
*
* @author Dennis Soemers
*/
public class BinaryLeafNode extends DecisionTreeNode
{
//-------------------------------------------------------------------------
/** Predicted probability of being a top move */
protected final float prob;
//-------------------------------------------------------------------------
/**
* Constructor
* @param prob
*/
public BinaryLeafNode
(
final float prob
)
{
this.prob = prob;
}
//-------------------------------------------------------------------------
@Override
public float predict(final FeatureVector featureVector)
{
return prob;
}
//-------------------------------------------------------------------------
@Override
public metadata.ai.features.trees.classifiers.DecisionTreeNode toMetadataNode()
{
return new metadata.ai.features.trees.classifiers.BinaryLeaf(Float.valueOf(prob));
}
//-------------------------------------------------------------------------
}
| 1,114 | 20.862745 | 84 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/DecisionConditionNode.java | package decision_trees.classifiers;
import features.Feature;
import features.FeatureVector;
import features.aspatial.AspatialFeature;
import metadata.ai.features.trees.classifiers.If;
/**
* Decision node in a feature-based logit tree
*
* @author Dennis Soemers
*/
public class DecisionConditionNode extends DecisionTreeNode
{
//-------------------------------------------------------------------------
/** The feature we want to evaluate (our condition) */
protected final Feature feature;
/** Node we should traverse to if feature is true */
protected final DecisionTreeNode trueNode;
/** Node we should traverse to if feature is false */
protected final DecisionTreeNode falseNode;
/** Index of the feature we look at in our feature set (may index into either aspatial or spatial features list) */
protected int featureIdx = -1;
//-------------------------------------------------------------------------
/**
* Constructor
* @param feature
* @param trueNode Node we should traverse to if feature is true
* @param falseNode Node we should traverse to if feature is false
*/
public DecisionConditionNode
(
final Feature feature,
final DecisionTreeNode trueNode,
final DecisionTreeNode falseNode
)
{
this.feature = feature;
this.trueNode = trueNode;
this.falseNode = falseNode;
}
/**
* Constructor
* @param feature
* @param trueNode Node we should traverse to if feature is true
* @param falseNode Node we should traverse to if feature is false
* @param featureIdx Index of the feature
*/
public DecisionConditionNode
(
final Feature feature,
final DecisionTreeNode trueNode,
final DecisionTreeNode falseNode,
final int featureIdx
)
{
this.feature = feature;
this.trueNode = trueNode;
this.falseNode = falseNode;
this.featureIdx = featureIdx;
}
//-------------------------------------------------------------------------
@Override
public float predict(final FeatureVector featureVector)
{
if (feature instanceof AspatialFeature)
{
if (featureVector.aspatialFeatureValues().get(featureIdx) != 0.f)
return trueNode.predict(featureVector);
else
return falseNode.predict(featureVector);
}
else
{
if (featureVector.activeSpatialFeatureIndices().contains(featureIdx))
return trueNode.predict(featureVector);
else
return falseNode.predict(featureVector);
}
}
//-------------------------------------------------------------------------
@Override
public metadata.ai.features.trees.classifiers.DecisionTreeNode toMetadataNode()
{
return new If(feature.toString(), trueNode.toMetadataNode(), falseNode.toMetadataNode());
}
//-------------------------------------------------------------------------
/**
* @return The feature we check in this node
*/
public Feature feature()
{
return feature;
}
/**
* @return The node we go to when the feature is active
*/
public DecisionTreeNode trueNode()
{
return trueNode;
}
/**
* @return The node we go to when the feature is not active
*/
public DecisionTreeNode falseNode()
{
return falseNode;
}
//-------------------------------------------------------------------------
}
| 3,205 | 23.852713 | 116 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/DecisionLeafNode.java | package decision_trees.classifiers;
import features.FeatureVector;
/**
* Leaf node in a feature-based decision tree, with probabilities for classes.
*
* @author Dennis Soemers
*/
public class DecisionLeafNode extends DecisionTreeNode
{
//-------------------------------------------------------------------------
/** Predicted probability of being a bottom-25% move */
protected final float bottom25Prob;
/** Predicted probability of being a move in the Interquartile Range */
protected final float iqrProb;
/** Predicted probability of being a top-25% move */
protected final float top25Prob;
//-------------------------------------------------------------------------
/**
* Constructor
* @param bottom25Prob
* @param iqrProb
* @param top25Prob
*/
public DecisionLeafNode
(
final float bottom25Prob,
final float iqrProb,
final float top25Prob
)
{
this.bottom25Prob = bottom25Prob;
this.iqrProb = iqrProb;
this.top25Prob = top25Prob;
}
//-------------------------------------------------------------------------
@Override
public float predict(final FeatureVector featureVector)
{
return top25Prob * (1.f - bottom25Prob);
}
//-------------------------------------------------------------------------
@Override
public metadata.ai.features.trees.classifiers.DecisionTreeNode toMetadataNode()
{
return new metadata.ai.features.trees.classifiers.Leaf(Float.valueOf(bottom25Prob), Float.valueOf(iqrProb), Float.valueOf(top25Prob));
}
//-------------------------------------------------------------------------
}
| 1,588 | 24.222222 | 136 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/DecisionTreeNode.java | package decision_trees.classifiers;
import features.Feature;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
/**
* Abstract class for a node in a feature-based decision tree
* that should output class probabilities.
*
* @author Dennis Soemers
*/
public abstract class DecisionTreeNode
{
//-------------------------------------------------------------------------
/**
* @param featureVector
* @return Predicted (unnormalised) probability estimate for playing given feature vector
*/
public abstract float predict(final FeatureVector featureVector);
//-------------------------------------------------------------------------
/**
* Convert to tree in metadata format.
* @return Decision tree node.
*/
public abstract metadata.ai.features.trees.classifiers.DecisionTreeNode toMetadataNode();
//-------------------------------------------------------------------------
/**
* Constructs a node (and hence, tree) from the given metadata node.
* @param metadataNode
* @param featureSet
* @return Constructed node
*/
public static DecisionTreeNode fromMetadataNode
(
final metadata.ai.features.trees.classifiers.DecisionTreeNode metadataNode,
final BaseFeatureSet featureSet
)
{
if (metadataNode instanceof metadata.ai.features.trees.classifiers.If)
{
final metadata.ai.features.trees.classifiers.If ifNode = (metadata.ai.features.trees.classifiers.If) metadataNode;
final DecisionTreeNode thenBranch = fromMetadataNode(ifNode.thenNode(), featureSet);
final DecisionTreeNode elseBranch = fromMetadataNode(ifNode.elseNode(), featureSet);
final String featureString = ifNode.featureString();
final int featureIdx = featureSet.findFeatureIndexForString(featureString);
final Feature feature;
if (featureIdx < featureSet.aspatialFeatures().length)
{
if (featureSet.aspatialFeatures()[featureIdx].toString().equals(featureString))
feature = featureSet.aspatialFeatures()[featureIdx];
else
feature = featureSet.spatialFeatures()[featureIdx];
}
else
{
feature = featureSet.spatialFeatures()[featureIdx];
}
return new DecisionConditionNode(feature, thenBranch, elseBranch, featureIdx);
}
else if (metadataNode instanceof metadata.ai.features.trees.classifiers.BinaryLeaf)
{
final metadata.ai.features.trees.classifiers.BinaryLeaf leafNode = (metadata.ai.features.trees.classifiers.BinaryLeaf) metadataNode;
return new BinaryLeafNode(leafNode.prob());
}
else
{
final metadata.ai.features.trees.classifiers.Leaf leafNode = (metadata.ai.features.trees.classifiers.Leaf) metadataNode;
return new DecisionLeafNode(leafNode.bottom25Prob(), leafNode.iqrProb(), leafNode.top25Prob());
}
}
//-------------------------------------------------------------------------
}
| 2,834 | 32.75 | 135 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/ExperienceBinaryClassificationTreeLearner.java | package decision_trees.classifiers;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.collections.ArrayUtils;
import main.collections.FVector;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning binary classification trees from experience.
*
* @author Dennis Soemers
*/
public class ExperienceBinaryClassificationTreeLearner
{
//-------------------------------------------------------------------------
/**
* Builds a binary classification tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static DecisionTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final TFloatArrayList allTargetLabels = new TFloatArrayList();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
logits[i] = oracleWeightVector.dot(featureVector);
}
final float maxLogit = ArrayUtils.max(logits);
final float minLogit = ArrayUtils.min(logits);
if (maxLogit == minLogit)
continue; // Nothing to learn from this, just skip it
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
allFeatureVectors.add(featureVector);
}
// Maximise logits for winning moves and minimise for losing moves
for (int i = sample.winningMoves().nextSetBit(0); i >= 0; i = sample.winningMoves().nextSetBit(i + 1))
{
logits[i] = maxLogit;
}
for (int i = sample.losingMoves().nextSetBit(0); i >= 0; i = sample.losingMoves().nextSetBit(i + 1))
{
logits[i] = minLogit;
}
final FVector policy = new FVector(logits);
policy.softmax();
final float maxProb = policy.max();
final float[] targets = new float[logits.length];
for (int i = 0; i < targets.length; ++i)
{
targets[i] = policy.get(i) / maxProb;
}
for (final float target : targets)
{
allTargetLabels.add(target);
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetLabels,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetLabels
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for decision tree, for given data
*/
private static DecisionTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final TFloatArrayList remainingTargetLabels,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
return new BinaryLeafNode(0.5f);
}
if (allowedDepth == 0)
{
// Have to create leaf node here
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
// For every aspatial and every spatial feature, if not already picked, compute mean prob for true and false branches
final double[] sumProbsIfFalseAspatial = new double[numAspatialFeatures];
final int[] numFalseAspatial = new int[numAspatialFeatures];
final double[] sumProbsIfTrueAspatial = new double[numAspatialFeatures];
final int[] numTrueAspatial = new int[numAspatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
sumProbsIfTrueAspatial[i] += targetProb;
++numTrueAspatial[i];
}
else
{
sumProbsIfFalseAspatial[i] += targetProb;
++numFalseAspatial[i];
}
}
}
final double[] sumProbsIfFalseSpatial = new double[numSpatialFeatures];
final int[] numFalseSpatial = new int[numSpatialFeatures];
final double[] sumProbsIfTrueSpatial = new double[numSpatialFeatures];
final int[] numTrueSpatial = new int[numSpatialFeatures];
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
final FeatureVector featureVector = remainingFeatureVectors.get(i);
final float targetProb = remainingTargetLabels.getQuick(i);
final boolean[] active = new boolean[numSpatialFeatures];
final TIntArrayList sparseSpatials = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < sparseSpatials.size(); ++j)
{
active[sparseSpatials.getQuick(j)] = true;
}
for (int j = 0; j < active.length; ++j)
{
if (alreadyPickedSpatials.get(j))
continue;
if (active[j])
{
sumProbsIfTrueSpatial[j] += targetProb;
++numTrueSpatial[j];
}
else
{
sumProbsIfFalseSpatial[j] += targetProb;
++numFalseSpatial[j];
}
}
}
final double[] meanProbsIfFalseAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfTrueAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfFalseSpatial = new double[numSpatialFeatures];
final double[] meanProbsIfTrueSpatial = new double[numSpatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] > 0)
meanProbsIfFalseAspatial[i] = sumProbsIfFalseAspatial[i] / numFalseAspatial[i];
if (numTrueAspatial[i] > 0)
meanProbsIfTrueAspatial[i] = sumProbsIfTrueAspatial[i] / numTrueAspatial[i];
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] > 0)
meanProbsIfFalseSpatial[i] = sumProbsIfFalseSpatial[i] / numFalseSpatial[i];
if (numTrueSpatial[i] > 0)
meanProbsIfTrueSpatial[i] = sumProbsIfTrueSpatial[i] / numTrueSpatial[i];
}
// Find feature that maximally reduces sum of squared errors
double minSumSquaredErrors = Double.POSITIVE_INFINITY;
double maxSumSquaredErrors = Double.NEGATIVE_INFINITY;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] < minSamplesPerLeaf || numTrueAspatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
error = targetProb - meanProbsIfTrueAspatial[i];
else
error = targetProb - meanProbsIfFalseAspatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] < minSamplesPerLeaf || numTrueSpatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.activeSpatialFeatureIndices().contains(i))
error = targetProb - meanProbsIfTrueSpatial[i];
else
error = targetProb - meanProbsIfFalseSpatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
bestFeatureIsAspatial = false;
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
if (bestIdx == -1 || minSumSquaredErrors == 0.0 || minSumSquaredErrors == maxSumSquaredErrors)
{
// No point in making any split at all, so just make leaf TODO could in theory use remaining features to compute a model again
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = featureSet.aspatialFeatures()[bestIdx];
else
splittingFeature = featureSet.spatialFeatures()[bestIdx];
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsTrue = new TFloatArrayList();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsFalse = new TFloatArrayList();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
// Create the node for case where splitting feature is true
final DecisionTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetProbsTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final DecisionTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetProbsFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new DecisionConditionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 12,750 | 28.792056 | 130 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/ExperienceIQRTreeLearner.java | package decision_trees.classifiers;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Comparator;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import main.collections.ArrayUtils;
import main.math.MathRoutines;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning decision trees (classifiers) from experience.
*
* @author Dennis Soemers
*/
public class ExperienceIQRTreeLearner
{
//-------------------------------------------------------------------------
/**
* Classes we distinguish for IQR tree
*
* @author Dennis Soemers
*/
private static enum IQRClass
{
/** Should remain unused */
UNDEFINED,
/** Bottom 25% */
Bottom25,
/** Interquartile Range */
IQR,
/** Top 25% */
Top25
}
//-------------------------------------------------------------------------
/**
* Builds an IQR classification tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static DecisionTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final List<IQRClass> allTargetClasses = new ArrayList<IQRClass>();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
logits[i] = oracleWeightVector.dot(featureVector);
}
final float maxLogit = ArrayUtils.max(logits);
final float minLogit = ArrayUtils.min(logits);
if (maxLogit == minLogit)
continue; // Nothing to learn from this, just skip it
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
allFeatureVectors.add(featureVector);
}
// Maximise logits for winning moves and minimise for losing moves
for (int i = sample.winningMoves().nextSetBit(0); i >= 0; i = sample.winningMoves().nextSetBit(i + 1))
{
logits[i] = maxLogit;
}
for (int i = sample.losingMoves().nextSetBit(0); i >= 0; i = sample.losingMoves().nextSetBit(i + 1))
{
logits[i] = minLogit;
}
final List<Integer> sortedIndices =
ArrayUtils.sortedIndices
(
featureVectors.length,
new Comparator<Integer>()
{
@Override
public int compare(final Integer i1, final Integer i2)
{
final float delta = logits[i1.intValue()] - logits[i2.intValue()];
if (delta < 0.f)
return -1;
if (delta > 0.f)
return 1;
return 0;
}
}
);
final int numBottom25 = (int) Math.min(1, Math.round(0.25 * featureVectors.length));
final int numTop25 = numBottom25;
final int numIQR = featureVectors.length - numBottom25 - numTop25;
float lowestTop25Logit = Float.POSITIVE_INFINITY;
float highestBottom25Logit = Float.NEGATIVE_INFINITY;
final IQRClass[] classes = new IQRClass[sortedIndices.size()];
for (int i = 0; i < numBottom25; ++i)
{
final float logit = logits[sortedIndices.get(i).intValue()];
classes[sortedIndices.get(i).intValue()] = IQRClass.Bottom25;
highestBottom25Logit = Math.max(highestBottom25Logit, logit);
}
for (int i = sortedIndices.size() - 1; i >= numBottom25 + numIQR; --i)
{
final float logit = logits[sortedIndices.get(i).intValue()];
classes[sortedIndices.get(i).intValue()] = IQRClass.Top25;
lowestTop25Logit = Math.min(lowestTop25Logit, logit);
}
for (int i = numBottom25; i < numBottom25 + numIQR; ++i)
{
final float logit = logits[sortedIndices.get(i).intValue()];
if (logit == lowestTop25Logit)
classes[sortedIndices.get(i).intValue()] = IQRClass.Top25;
else if (logit == highestBottom25Logit)
classes[sortedIndices.get(i).intValue()] = IQRClass.Bottom25;
else
classes[sortedIndices.get(i).intValue()] = IQRClass.IQR;
}
if (lowestTop25Logit == highestBottom25Logit)
{
// Top 25% and Bottom 25% logits overlap, so shrink those two buckets
// and instead have a greater IQR
for (int i = 0; i < sortedIndices.size(); ++i)
{
final float logit = logits[sortedIndices.get(i).intValue()];
if (logit == lowestTop25Logit)
classes[sortedIndices.get(i).intValue()] = IQRClass.IQR;
}
}
for (final IQRClass targetClass : classes)
{
allTargetClasses.add(targetClass);
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetClasses,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetClasses
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for decision tree, for given data
*/
private static DecisionTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final List<IQRClass> remainingTargetClasses,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
// This should probably never happen
System.err.println("Empty list of remaining feature vectors!");
return new DecisionLeafNode(1.f / 3, 1.f / 3, 1.f / 3);
}
int numBottom25 = 0;
int numTop25 = 0;
for (final IQRClass iqrClass : remainingTargetClasses)
{
if (iqrClass == IQRClass.Bottom25)
++numBottom25;
else if (iqrClass == IQRClass.Top25)
++numTop25;
}
final float probBottom25 = ((float)numBottom25) / remainingTargetClasses.size();
final float probTop25 = ((float)numTop25) / remainingTargetClasses.size();
final float probIQR = 1.f - probBottom25 - probTop25;
if (allowedDepth == 0)
{
// Have to create leaf node here
return new DecisionLeafNode(probBottom25, probIQR, probTop25);
}
double entropyBeforeSplit = 0.0;
if (probBottom25 > 0.f)
entropyBeforeSplit -= probBottom25 * MathRoutines.log2(probBottom25);
if (probTop25 > 0.f)
entropyBeforeSplit -= probTop25 * MathRoutines.log2(probTop25);
if (probIQR > 0.f)
entropyBeforeSplit -= probIQR * MathRoutines.log2(probIQR);
// Find feature with maximum information gain
double maxInformationGain = Double.NEGATIVE_INFINITY;
double minInformationGain = Double.POSITIVE_INFINITY;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
int numBottom25IfFalse = 0;
int numIQRIfFalse = 0;
int numTop25IfFalse = 0;
int numBottom25IfTrue = 0;
int numIQRIfTrue = 0;
int numTop25IfTrue = 0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final IQRClass iqrClass = remainingTargetClasses.get(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
switch (iqrClass)
{
case Bottom25:
++numBottom25IfTrue;
break;
case IQR:
++numIQRIfTrue;
break;
case Top25:
++numTop25IfTrue;
break;
//$CASES-OMITTED$
default:
System.err.println("Unrecognised IQR class!");
}
}
else
{
switch (iqrClass)
{
case Bottom25:
++numBottom25IfFalse;
break;
case IQR:
++numIQRIfFalse;
break;
case Top25:
++numTop25IfFalse;
break;
//$CASES-OMITTED$
default:
System.err.println("Unrecognised IQR class!");
}
}
}
final int totalNumFalse = numBottom25IfFalse + numIQRIfFalse + numTop25IfFalse;
final int totalNumTrue = numBottom25IfTrue + numIQRIfTrue + numTop25IfTrue;
if (totalNumFalse < minSamplesPerLeaf || totalNumTrue < minSamplesPerLeaf)
continue;
final double probBottom25IfFalse = ((double)numBottom25IfFalse) / totalNumFalse;
final double probIQRIfFalse = ((double)numIQRIfFalse) / totalNumFalse;
final double probTop25IfFalse = ((double)numTop25IfFalse) / totalNumFalse;
final double probBottom25IfTrue = ((double)numBottom25IfTrue) / totalNumTrue;
final double probIQRIfTrue = ((double)numIQRIfTrue) / totalNumTrue;
final double probTop25IfTrue = ((double)numTop25IfTrue) / totalNumTrue;
double entropyFalseBranch = 0.0;
if (probBottom25IfFalse > 0.f)
entropyFalseBranch -= probBottom25IfFalse * MathRoutines.log2(probBottom25IfFalse);
if (probIQRIfFalse > 0.f)
entropyFalseBranch -= probIQRIfFalse * MathRoutines.log2(probIQRIfFalse);
if (probTop25IfFalse > 0.f)
entropyFalseBranch -= probTop25IfFalse * MathRoutines.log2(probTop25IfFalse);
double entropyTrueBranch = 0.0;
if (probBottom25IfTrue > 0.f)
entropyTrueBranch -= probBottom25IfTrue * MathRoutines.log2(probBottom25IfTrue);
if (probIQRIfTrue > 0.f)
entropyTrueBranch -= probIQRIfTrue * MathRoutines.log2(probIQRIfTrue);
if (probTop25IfTrue > 0.f)
entropyTrueBranch -= probTop25IfTrue * MathRoutines.log2(probTop25IfTrue);
final double probFalse = ((double)totalNumFalse) / (totalNumFalse + totalNumTrue);
final double probTrue = 1.0 - probFalse;
final double informationGain = entropyBeforeSplit - probFalse * entropyFalseBranch - probTrue * entropyTrueBranch;
if (informationGain > maxInformationGain)
{
maxInformationGain = informationGain;
bestIdx = i;
}
if (informationGain < minInformationGain)
{
minInformationGain = informationGain;
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (alreadyPickedSpatials.get(i))
continue;
int numBottom25IfFalse = 0;
int numIQRIfFalse = 0;
int numTop25IfFalse = 0;
int numBottom25IfTrue = 0;
int numIQRIfTrue = 0;
int numTop25IfTrue = 0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final IQRClass iqrClass = remainingTargetClasses.get(j);
if (featureVector.activeSpatialFeatureIndices().contains(i))
{
switch (iqrClass)
{
case Bottom25:
++numBottom25IfTrue;
break;
case IQR:
++numIQRIfTrue;
break;
case Top25:
++numTop25IfTrue;
break;
//$CASES-OMITTED$
default:
System.err.println("Unrecognised IQR class!");
}
}
else
{
switch (iqrClass)
{
case Bottom25:
++numBottom25IfFalse;
break;
case IQR:
++numIQRIfFalse;
break;
case Top25:
++numTop25IfFalse;
break;
//$CASES-OMITTED$
default:
System.err.println("Unrecognised IQR class!");
}
}
}
final int totalNumFalse = numBottom25IfFalse + numIQRIfFalse + numTop25IfFalse;
final int totalNumTrue = numBottom25IfTrue + numIQRIfTrue + numTop25IfTrue;
if (totalNumFalse < minSamplesPerLeaf || totalNumTrue < minSamplesPerLeaf)
continue;
final double probBottom25IfFalse = ((double)numBottom25IfFalse) / totalNumFalse;
final double probIQRIfFalse = ((double)numIQRIfFalse) / totalNumFalse;
final double probTop25IfFalse = ((double)numTop25IfFalse) / totalNumFalse;
final double probBottom25IfTrue = ((double)numBottom25IfTrue) / totalNumTrue;
final double probIQRIfTrue = ((double)numIQRIfTrue) / totalNumTrue;
final double probTop25IfTrue = ((double)numTop25IfTrue) / totalNumTrue;
double entropyFalseBranch = 0.0;
if (probBottom25IfFalse > 0.f)
entropyFalseBranch -= probBottom25IfFalse * MathRoutines.log2(probBottom25IfFalse);
if (probIQRIfFalse > 0.f)
entropyFalseBranch -= probIQRIfFalse * MathRoutines.log2(probIQRIfFalse);
if (probTop25IfFalse > 0.f)
entropyFalseBranch -= probTop25IfFalse * MathRoutines.log2(probTop25IfFalse);
double entropyTrueBranch = 0.0;
if (probBottom25IfTrue > 0.f)
entropyTrueBranch -= probBottom25IfTrue * MathRoutines.log2(probBottom25IfTrue);
if (probIQRIfTrue > 0.f)
entropyTrueBranch -= probIQRIfTrue * MathRoutines.log2(probIQRIfTrue);
if (probTop25IfTrue > 0.f)
entropyTrueBranch -= probTop25IfTrue * MathRoutines.log2(probTop25IfTrue);
final double probFalse = ((double)totalNumFalse) / (totalNumFalse + totalNumTrue);
final double probTrue = 1.0 - probFalse;
final double informationGain = entropyBeforeSplit - probFalse * entropyFalseBranch - probTrue * entropyTrueBranch;
if (informationGain > maxInformationGain)
{
maxInformationGain = informationGain;
bestIdx = i;
bestFeatureIsAspatial = false;
}
if (informationGain < minInformationGain)
{
minInformationGain = informationGain;
}
}
if (bestIdx == -1 || maxInformationGain == 0.0 || minInformationGain == maxInformationGain)
{
// No point in making any split at all, so just make leaf
return new DecisionLeafNode(probBottom25, probIQR, probTop25);
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = featureSet.aspatialFeatures()[bestIdx];
else
splittingFeature = featureSet.spatialFeatures()[bestIdx];
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final List<IQRClass> remainingTargetClassesTrue = new ArrayList<IQRClass>();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final List<IQRClass> remainingTargetClassesFalse = new ArrayList<IQRClass>();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetClassesTrue.add(remainingTargetClasses.get(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetClassesFalse.add(remainingTargetClasses.get(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetClassesTrue.add(remainingTargetClasses.get(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetClassesFalse.add(remainingTargetClasses.get(i));
}
}
}
// Create the node for case where splitting feature is true
final DecisionTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetClassesTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final DecisionTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetClassesFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new DecisionConditionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 17,452 | 29.039587 | 117 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/ExperienceImbalancedBinaryClassificationTree2Learner.java | package decision_trees.classifiers;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.collections.ArrayUtils;
import main.collections.FVector;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning imbalanced binary classification trees from experience,
* where the "True" branch must always directly end in a leaf node.
*
* @author Dennis Soemers
*/
public class ExperienceImbalancedBinaryClassificationTree2Learner
{
//-------------------------------------------------------------------------
/**
* Builds an exact logit tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static DecisionTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final TFloatArrayList allTargetLabels = new TFloatArrayList();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
logits[i] = oracleWeightVector.dot(featureVector);
}
final float maxLogit = ArrayUtils.max(logits);
final float minLogit = ArrayUtils.min(logits);
if (maxLogit == minLogit)
continue; // Nothing to learn from this, just skip it
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
allFeatureVectors.add(featureVector);
}
// Maximise logits for winning moves and minimise for losing moves
for (int i = sample.winningMoves().nextSetBit(0); i >= 0; i = sample.winningMoves().nextSetBit(i + 1))
{
logits[i] = maxLogit;
}
for (int i = sample.losingMoves().nextSetBit(0); i >= 0; i = sample.losingMoves().nextSetBit(i + 1))
{
logits[i] = minLogit;
}
final FVector policy = new FVector(logits);
policy.softmax();
final float maxProb = policy.max();
final float[] targets = new float[logits.length];
for (int i = 0; i < targets.length; ++i)
{
targets[i] = policy.get(i) / maxProb;
}
for (final float target : targets)
{
allTargetLabels.add(target);
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetLabels,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetLabels
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for decision tree, for given data
*/
private static DecisionTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final TFloatArrayList remainingTargetLabels,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
return new BinaryLeafNode(0.5f);
}
if (allowedDepth == 0)
{
// Have to create leaf node here
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
// For every aspatial and every spatial feature, if not already picked, compute mean prob for true and false branches
final double[] sumProbsIfFalseAspatial = new double[numAspatialFeatures];
final int[] numFalseAspatial = new int[numAspatialFeatures];
final double[] sumProbsIfTrueAspatial = new double[numAspatialFeatures];
final int[] numTrueAspatial = new int[numAspatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
sumProbsIfTrueAspatial[i] += targetProb;
++numTrueAspatial[i];
}
else
{
sumProbsIfFalseAspatial[i] += targetProb;
++numFalseAspatial[i];
}
}
}
final double[] sumProbsIfFalseSpatial = new double[numSpatialFeatures];
final int[] numFalseSpatial = new int[numSpatialFeatures];
final double[] sumProbsIfTrueSpatial = new double[numSpatialFeatures];
final int[] numTrueSpatial = new int[numSpatialFeatures];
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
final FeatureVector featureVector = remainingFeatureVectors.get(i);
final float targetProb = remainingTargetLabels.getQuick(i);
final boolean[] active = new boolean[numSpatialFeatures];
final TIntArrayList sparseSpatials = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < sparseSpatials.size(); ++j)
{
active[sparseSpatials.getQuick(j)] = true;
}
for (int j = 0; j < active.length; ++j)
{
if (alreadyPickedSpatials.get(j))
continue;
if (active[j])
{
sumProbsIfTrueSpatial[j] += targetProb;
++numTrueSpatial[j];
}
else
{
sumProbsIfFalseSpatial[j] += targetProb;
++numFalseSpatial[j];
}
}
}
final double[] meanProbsIfFalseAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfTrueAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfFalseSpatial = new double[numSpatialFeatures];
final double[] meanProbsIfTrueSpatial = new double[numSpatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] > 0)
meanProbsIfFalseAspatial[i] = sumProbsIfFalseAspatial[i] / numFalseAspatial[i];
if (numTrueAspatial[i] > 0)
meanProbsIfTrueAspatial[i] = sumProbsIfTrueAspatial[i] / numTrueAspatial[i];
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] > 0)
meanProbsIfFalseSpatial[i] = sumProbsIfFalseSpatial[i] / numFalseSpatial[i];
if (numTrueSpatial[i] > 0)
meanProbsIfTrueSpatial[i] = sumProbsIfTrueSpatial[i] / numTrueSpatial[i];
}
// Find feature that maximally reduces sum of squared errors
double minSumSquaredErrors = Double.POSITIVE_INFINITY;
double maxSumSquaredErrors = Double.NEGATIVE_INFINITY;
int bestTrueBranchNumSamples = -1;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] < minSamplesPerLeaf || numTrueAspatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
error = targetProb - meanProbsIfTrueAspatial[i];
else
error = targetProb - meanProbsIfFalseAspatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
bestTrueBranchNumSamples = numTrueAspatial[i];
}
else if (sumSquaredErrors == minSumSquaredErrors && numTrueAspatial[i] > bestTrueBranchNumSamples)
{
bestIdx = i;
bestTrueBranchNumSamples = numTrueAspatial[i];
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] < minSamplesPerLeaf || numTrueSpatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.activeSpatialFeatureIndices().contains(i))
error = targetProb - meanProbsIfTrueSpatial[i];
else
error = targetProb - meanProbsIfFalseSpatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
bestTrueBranchNumSamples = numTrueSpatial[i];
bestFeatureIsAspatial = false;
}
else if (sumSquaredErrors == minSumSquaredErrors && numTrueSpatial[i] > bestTrueBranchNumSamples)
{
bestIdx = i;
bestTrueBranchNumSamples = numTrueSpatial[i];
bestFeatureIsAspatial = false;
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
if (bestIdx == -1 || minSumSquaredErrors == maxSumSquaredErrors)
{
// No point in making any split at all, so just make leaf TODO could in theory use remaining features to compute a model again
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = featureSet.aspatialFeatures()[bestIdx];
else
splittingFeature = featureSet.spatialFeatures()[bestIdx];
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsTrue = new TFloatArrayList();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsFalse = new TFloatArrayList();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
// Create the node for case where splitting feature is true
final DecisionTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetProbsTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
0, // Force immediately making a leaf
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final DecisionTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetProbsFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new DecisionConditionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 13,361 | 29.162528 | 130 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/ExperienceImbalancedBinaryClassificationTreeLearner.java | package decision_trees.classifiers;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.collections.ArrayUtils;
import main.collections.FVector;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning imbalanced binary classification trees from experience,
* where the "True" branch must always directly end in a leaf node.
*
* @author Dennis Soemers
*/
public class ExperienceImbalancedBinaryClassificationTreeLearner
{
//-------------------------------------------------------------------------
/**
* Builds an exact logit tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static DecisionTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final TFloatArrayList allTargetLabels = new TFloatArrayList();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
logits[i] = oracleWeightVector.dot(featureVector);
}
final float maxLogit = ArrayUtils.max(logits);
final float minLogit = ArrayUtils.min(logits);
if (maxLogit == minLogit)
continue; // Nothing to learn from this, just skip it
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
allFeatureVectors.add(featureVector);
}
// Maximise logits for winning moves and minimise for losing moves
for (int i = sample.winningMoves().nextSetBit(0); i >= 0; i = sample.winningMoves().nextSetBit(i + 1))
{
logits[i] = maxLogit;
}
for (int i = sample.losingMoves().nextSetBit(0); i >= 0; i = sample.losingMoves().nextSetBit(i + 1))
{
logits[i] = minLogit;
}
final FVector policy = new FVector(logits);
policy.softmax();
final float maxProb = policy.max();
final float[] targets = new float[logits.length];
for (int i = 0; i < targets.length; ++i)
{
targets[i] = policy.get(i) / maxProb;
}
for (final float target : targets)
{
allTargetLabels.add(target);
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetLabels,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetLabels
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for decision tree, for given data
*/
private static DecisionTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final TFloatArrayList remainingTargetLabels,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
return new BinaryLeafNode(0.5f);
}
if (allowedDepth == 0)
{
// Have to create leaf node here
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
// For every aspatial and every spatial feature, if not already picked, compute mean prob for true and false branches
final double[] sumProbsIfFalseAspatial = new double[numAspatialFeatures];
final int[] numFalseAspatial = new int[numAspatialFeatures];
final double[] sumProbsIfTrueAspatial = new double[numAspatialFeatures];
final int[] numTrueAspatial = new int[numAspatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
sumProbsIfTrueAspatial[i] += targetProb;
++numTrueAspatial[i];
}
else
{
sumProbsIfFalseAspatial[i] += targetProb;
++numFalseAspatial[i];
}
}
}
final double[] sumProbsIfFalseSpatial = new double[numSpatialFeatures];
final int[] numFalseSpatial = new int[numSpatialFeatures];
final double[] sumProbsIfTrueSpatial = new double[numSpatialFeatures];
final int[] numTrueSpatial = new int[numSpatialFeatures];
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
final FeatureVector featureVector = remainingFeatureVectors.get(i);
final float targetProb = remainingTargetLabels.getQuick(i);
final boolean[] active = new boolean[numSpatialFeatures];
final TIntArrayList sparseSpatials = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < sparseSpatials.size(); ++j)
{
active[sparseSpatials.getQuick(j)] = true;
}
for (int j = 0; j < active.length; ++j)
{
if (alreadyPickedSpatials.get(j))
continue;
if (active[j])
{
sumProbsIfTrueSpatial[j] += targetProb;
++numTrueSpatial[j];
}
else
{
sumProbsIfFalseSpatial[j] += targetProb;
++numFalseSpatial[j];
}
}
}
final double[] meanProbsIfTrueAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfTrueSpatial = new double[numSpatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numTrueAspatial[i] > 0)
meanProbsIfTrueAspatial[i] = sumProbsIfTrueAspatial[i] / numTrueAspatial[i];
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numTrueSpatial[i] > 0)
meanProbsIfTrueSpatial[i] = sumProbsIfTrueSpatial[i] / numTrueSpatial[i];
}
// Find feature that maximally reduces squared errors for true branch
double minTrueBranchSquaredErrors = Double.POSITIVE_INFINITY;
double maxTrueBranchSquaredErrors = Double.NEGATIVE_INFINITY;
int bestTrueBranchNumSamples = -1;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] < minSamplesPerLeaf || numTrueAspatial[i] < minSamplesPerLeaf)
continue;
double trueBranchSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
error = targetProb - meanProbsIfTrueAspatial[i];
else
error = 0.0; // Ignore false branch
trueBranchSquaredErrors += (error * error);
}
if (trueBranchSquaredErrors < minTrueBranchSquaredErrors)
{
minTrueBranchSquaredErrors = trueBranchSquaredErrors;
bestIdx = i;
bestTrueBranchNumSamples = numTrueAspatial[i];
}
else if (trueBranchSquaredErrors == minTrueBranchSquaredErrors && numTrueAspatial[i] > bestTrueBranchNumSamples)
{
bestIdx = i;
bestTrueBranchNumSamples = numTrueAspatial[i];
}
if (trueBranchSquaredErrors > maxTrueBranchSquaredErrors)
{
maxTrueBranchSquaredErrors = trueBranchSquaredErrors;
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] < minSamplesPerLeaf || numTrueSpatial[i] < minSamplesPerLeaf)
continue;
double trueBranchSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
final double error;
if (featureVector.activeSpatialFeatureIndices().contains(i))
error = targetProb - meanProbsIfTrueSpatial[i];
else
error = 0.0; // Ignore false branch
trueBranchSquaredErrors += (error * error);
}
if (trueBranchSquaredErrors < minTrueBranchSquaredErrors)
{
minTrueBranchSquaredErrors = trueBranchSquaredErrors;
bestIdx = i;
bestTrueBranchNumSamples = numTrueSpatial[i];
bestFeatureIsAspatial = false;
}
else if (trueBranchSquaredErrors == minTrueBranchSquaredErrors && numTrueSpatial[i] > bestTrueBranchNumSamples)
{
bestIdx = i;
bestTrueBranchNumSamples = numTrueSpatial[i];
}
if (trueBranchSquaredErrors > maxTrueBranchSquaredErrors)
{
maxTrueBranchSquaredErrors = trueBranchSquaredErrors;
}
}
if (bestIdx == -1 || minTrueBranchSquaredErrors == maxTrueBranchSquaredErrors)
{
// No point in making any split at all, so just make leaf TODO could in theory use remaining features to compute a model again
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = featureSet.aspatialFeatures()[bestIdx];
else
splittingFeature = featureSet.spatialFeatures()[bestIdx];
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsTrue = new TFloatArrayList();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsFalse = new TFloatArrayList();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
// Create the node for case where splitting feature is true
final DecisionTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetProbsTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
0, // Force immediately making a leaf
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final DecisionTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetProbsFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new DecisionConditionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 13,114 | 29.218894 | 130 | java |
Ludii | Ludii-master/AI/src/decision_trees/classifiers/ExperienceUrgencyTreeLearner.java | package decision_trees.classifiers;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.collections.ArrayUtils;
import main.collections.FVector;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning urgency trees from experience.
*
* @author Dennis Soemers
*/
public class ExperienceUrgencyTreeLearner
{
//-------------------------------------------------------------------------
/**
* Builds an urgency tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static DecisionTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final TFloatArrayList allTargetLabels = new TFloatArrayList();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
logits[i] = oracleWeightVector.dot(featureVector);
}
final float maxLogit = ArrayUtils.max(logits);
final float minLogit = ArrayUtils.min(logits);
if (maxLogit == minLogit)
continue; // Nothing to learn from this, just skip it
for (int i = 0; i < featureVectors.length; ++i)
{
final FeatureVector featureVector = featureVectors[i];
allFeatureVectors.add(featureVector);
}
// Maximise logits for winning moves and minimise for losing moves
for (int i = sample.winningMoves().nextSetBit(0); i >= 0; i = sample.winningMoves().nextSetBit(i + 1))
{
logits[i] = maxLogit;
}
for (int i = sample.losingMoves().nextSetBit(0); i >= 0; i = sample.losingMoves().nextSetBit(i + 1))
{
logits[i] = minLogit;
}
final FVector policy = new FVector(logits);
policy.softmax();
final float maxProb = policy.max();
final float[] targets = new float[logits.length];
for (int i = 0; i < targets.length; ++i)
{
targets[i] = policy.get(i) / maxProb;
}
for (final float target : targets)
{
allTargetLabels.add(target);
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetLabels,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetLabels
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for decision tree, for given data
*/
private static DecisionTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final TFloatArrayList remainingTargetLabels,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
return new BinaryLeafNode(0.5f);
}
if (allowedDepth == 0)
{
// Have to create leaf node here
return new BinaryLeafNode(remainingTargetLabels.sum() / remainingTargetLabels.size());
}
// Compute baseline prob (mean for the full node that we want to split)
final double baselineProb = remainingTargetLabels.sum() / remainingTargetLabels.size();
// For every aspatial and every spatial feature, if not already picked, compute mean prob for true and false branches
final double[] sumProbsIfFalseAspatial = new double[numAspatialFeatures];
final int[] numFalseAspatial = new int[numAspatialFeatures];
final double[] sumProbsIfTrueAspatial = new double[numAspatialFeatures];
final int[] numTrueAspatial = new int[numAspatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetProb = remainingTargetLabels.getQuick(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
sumProbsIfTrueAspatial[i] += targetProb;
++numTrueAspatial[i];
}
else
{
sumProbsIfFalseAspatial[i] += targetProb;
++numFalseAspatial[i];
}
}
}
final double[] sumProbsIfFalseSpatial = new double[numSpatialFeatures];
final int[] numFalseSpatial = new int[numSpatialFeatures];
final double[] sumProbsIfTrueSpatial = new double[numSpatialFeatures];
final int[] numTrueSpatial = new int[numSpatialFeatures];
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
final FeatureVector featureVector = remainingFeatureVectors.get(i);
final float targetProb = remainingTargetLabels.getQuick(i);
final boolean[] active = new boolean[numSpatialFeatures];
final TIntArrayList sparseSpatials = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < sparseSpatials.size(); ++j)
{
active[sparseSpatials.getQuick(j)] = true;
}
for (int j = 0; j < active.length; ++j)
{
if (alreadyPickedSpatials.get(j))
continue;
if (active[j])
{
sumProbsIfTrueSpatial[j] += targetProb;
++numTrueSpatial[j];
}
else
{
sumProbsIfFalseSpatial[j] += targetProb;
++numFalseSpatial[j];
}
}
}
final double[] meanProbsIfFalseAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfTrueAspatial = new double[numAspatialFeatures];
final double[] meanProbsIfFalseSpatial = new double[numSpatialFeatures];
final double[] meanProbsIfTrueSpatial = new double[numSpatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] > 0)
meanProbsIfFalseAspatial[i] = sumProbsIfFalseAspatial[i] / numFalseAspatial[i];
if (numTrueAspatial[i] > 0)
meanProbsIfTrueAspatial[i] = sumProbsIfTrueAspatial[i] / numTrueAspatial[i];
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] > 0)
meanProbsIfFalseSpatial[i] = sumProbsIfFalseSpatial[i] / numFalseSpatial[i];
if (numTrueSpatial[i] > 0)
meanProbsIfTrueSpatial[i] = sumProbsIfTrueSpatial[i] / numTrueSpatial[i];
}
// Find features with maximum scaled urgency
final double negativeRange = baselineProb;
final double positiveRange = 1.0 - baselineProb;
double maxUrgency = 0.0;
final TIntArrayList bestIndices = new TIntArrayList();
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] < minSamplesPerLeaf || numTrueAspatial[i] < minSamplesPerLeaf)
continue;
final double scaledUrgencyFalse;
if (meanProbsIfFalseAspatial[i] > baselineProb)
scaledUrgencyFalse = (meanProbsIfFalseAspatial[i] - baselineProb) / positiveRange;
else
scaledUrgencyFalse = (baselineProb - meanProbsIfFalseAspatial[i]) / negativeRange;
final double scaledUrgencyTrue;
if (meanProbsIfTrueAspatial[i] > baselineProb)
scaledUrgencyTrue = (meanProbsIfTrueAspatial[i] - baselineProb) / positiveRange;
else
scaledUrgencyTrue = (baselineProb - meanProbsIfTrueAspatial[i]) / negativeRange;
final double scaledUrgency = Math.max(scaledUrgencyFalse, scaledUrgencyTrue);
if (scaledUrgency > maxUrgency)
{
bestIndices.reset();
bestIndices.add(i);
maxUrgency = scaledUrgency;
}
else if (scaledUrgency == maxUrgency)
{
bestIndices.add(i);
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] < minSamplesPerLeaf || numTrueSpatial[i] < minSamplesPerLeaf)
continue;
final double scaledUrgencyFalse;
if (meanProbsIfFalseSpatial[i] > baselineProb)
scaledUrgencyFalse = (meanProbsIfFalseSpatial[i] - baselineProb) / positiveRange;
else
scaledUrgencyFalse = (baselineProb - meanProbsIfFalseSpatial[i]) / negativeRange;
final double scaledUrgencyTrue;
if (meanProbsIfTrueSpatial[i] > baselineProb)
scaledUrgencyTrue = (meanProbsIfTrueSpatial[i] - baselineProb) / positiveRange;
else
scaledUrgencyTrue = (baselineProb - meanProbsIfTrueSpatial[i]) / negativeRange;
final double scaledUrgency = Math.max(scaledUrgencyFalse, scaledUrgencyTrue);
if (scaledUrgency > maxUrgency)
{
bestIndices.reset();
bestIndices.add(i + numAspatialFeatures);
maxUrgency = scaledUrgency;
}
else if (scaledUrgency == maxUrgency)
{
bestIndices.add(i + numAspatialFeatures);
}
}
if (bestIndices.isEmpty() || maxUrgency == 0.0)
{
// No point in making any split at all, so just make leaf TODO could in theory use remaining features to compute a model again
return new BinaryLeafNode((float) baselineProb);
}
// Use sample size as tie-breaker
int bestSampleSize = 0;
Feature splittingFeature = null;
boolean bestFeatureIsAspatial = false;
int bestIdx = -1;
for (int i = 0; i < bestIndices.size(); ++i)
{
final int rawIdx = bestIndices.getQuick(i);
final boolean isAspatial = (rawIdx < numAspatialFeatures);
final int adjustedIdx = isAspatial ? rawIdx : rawIdx - numAspatialFeatures;
final int sampleSize;
if (isAspatial)
sampleSize = Math.min(numFalseAspatial[adjustedIdx], numTrueAspatial[adjustedIdx]);
else
sampleSize = Math.min(numFalseSpatial[adjustedIdx], numTrueSpatial[adjustedIdx]);
if (sampleSize > bestSampleSize)
{
bestSampleSize = sampleSize;
splittingFeature = isAspatial ? featureSet.aspatialFeatures()[adjustedIdx] : featureSet.spatialFeatures()[adjustedIdx];
bestFeatureIsAspatial = isAspatial;
bestIdx = adjustedIdx;
}
}
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsTrue = new TFloatArrayList();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetProbsFalse = new TFloatArrayList();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetProbsTrue.add(remainingTargetLabels.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetProbsFalse.add(remainingTargetLabels.getQuick(i));
}
}
}
// Create the node for case where splitting feature is true
final DecisionTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetProbsTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final DecisionTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetProbsFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new DecisionConditionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 13,750 | 29.557778 | 130 | java |
Ludii | Ludii-master/AI/src/decision_trees/logits/ExactLogitTreeLearner.java | package decision_trees.logits;
import java.util.ArrayList;
import java.util.List;
import features.Feature;
import features.aspatial.AspatialFeature;
import features.aspatial.InterceptFeature;
import features.feature_sets.BaseFeatureSet;
import features.spatial.SpatialFeature;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import main.collections.FVector;
import main.collections.ListUtils;
/**
* Class with methods for learning an exact logit tree.
*
* @author Dennis Soemers
*/
public class ExactLogitTreeLearner
{
//-------------------------------------------------------------------------
/**
* Builds an exact logit tree node for given feature set and linear function of weights
* @param featureSet
* @param linFunc
* @param maxDepth
* @return Root node of the generated tree
*/
public static LogitTreeNode buildTree(final BaseFeatureSet featureSet, final LinearFunction linFunc, final int maxDepth)
{
final List<AspatialFeature> aspatialFeatures = new ArrayList<AspatialFeature>(featureSet.aspatialFeatures().length);
for (final AspatialFeature aspatial : featureSet.aspatialFeatures())
{
aspatialFeatures.add(aspatial);
}
final List<SpatialFeature> spatialFeatures = new ArrayList<SpatialFeature>(featureSet.spatialFeatures().length);
for (final SpatialFeature spatial : featureSet.spatialFeatures())
{
spatialFeatures.add(spatial);
}
final FVector allWeights = linFunc.effectiveParams().allWeights();
final TFloatArrayList aspatialWeights = new TFloatArrayList(aspatialFeatures.size());
final TFloatArrayList spatialWeights = new TFloatArrayList(spatialFeatures.size());
for (int i = 0; i < allWeights.dim(); ++i)
{
if (i < aspatialFeatures.size())
aspatialWeights.add(allWeights.get(i));
else
spatialWeights.add(allWeights.get(i));
}
// Remove intercept features and collect accumulated intercept
float accumInterceptWeight = 0.f;
for (int i = aspatialFeatures.size() - 1; i >= 0; --i)
{
if (aspatialFeatures.get(i) instanceof InterceptFeature)
{
accumInterceptWeight += aspatialWeights.removeAt(i);
aspatialFeatures.remove(i);
}
}
// Remove all 0-weight features
for (int i = aspatialFeatures.size() - 1; i >= 0; --i)
{
if (aspatialWeights.getQuick(i) == 0.f)
{
ListUtils.removeSwap(aspatialWeights, i);
ListUtils.removeSwap(aspatialFeatures, i);
}
}
for (int i = spatialFeatures.size() - 1; i >= 0; --i)
{
if (spatialWeights.getQuick(i) == 0.f)
{
ListUtils.removeSwap(spatialWeights, i);
ListUtils.removeSwap(spatialFeatures, i);
}
}
return buildNode(aspatialFeatures, aspatialWeights, spatialFeatures, spatialWeights, accumInterceptWeight, maxDepth);
}
/**
* Builds an exact logit tree node for given feature set and linear function of weights,
* using a naive approach that simply splits on the feature with the maximum absolute weight
* @param featureSet
* @param linFunc
* @param maxDepth
* @return Root node of the generated tree
*/
public static LogitTreeNode buildTreeNaiveMaxAbs(final BaseFeatureSet featureSet, final LinearFunction linFunc, final int maxDepth)
{
final List<AspatialFeature> aspatialFeatures = new ArrayList<AspatialFeature>(featureSet.aspatialFeatures().length);
for (final AspatialFeature aspatial : featureSet.aspatialFeatures())
{
aspatialFeatures.add(aspatial);
}
final List<SpatialFeature> spatialFeatures = new ArrayList<SpatialFeature>(featureSet.spatialFeatures().length);
for (final SpatialFeature spatial : featureSet.spatialFeatures())
{
spatialFeatures.add(spatial);
}
final FVector allWeights = linFunc.effectiveParams().allWeights();
final TFloatArrayList aspatialWeights = new TFloatArrayList(aspatialFeatures.size());
final TFloatArrayList spatialWeights = new TFloatArrayList(spatialFeatures.size());
for (int i = 0; i < allWeights.dim(); ++i)
{
if (i < aspatialFeatures.size())
aspatialWeights.add(allWeights.get(i));
else
spatialWeights.add(allWeights.get(i));
}
// Remove intercept features and collect accumulated intercept
float accumInterceptWeight = 0.f;
for (int i = aspatialFeatures.size() - 1; i >= 0; --i)
{
if (aspatialFeatures.get(i) instanceof InterceptFeature)
{
accumInterceptWeight += aspatialWeights.removeAt(i);
aspatialFeatures.remove(i);
}
}
// Remove all 0-weight features
for (int i = aspatialFeatures.size() - 1; i >= 0; --i)
{
if (aspatialWeights.getQuick(i) == 0.f)
{
ListUtils.removeSwap(aspatialWeights, i);
ListUtils.removeSwap(aspatialFeatures, i);
}
}
for (int i = spatialFeatures.size() - 1; i >= 0; --i)
{
if (spatialWeights.getQuick(i) == 0.f)
{
ListUtils.removeSwap(spatialWeights, i);
ListUtils.removeSwap(spatialFeatures, i);
}
}
return buildNodeNaiveMaxAbs(aspatialFeatures, aspatialWeights, spatialFeatures, spatialWeights, accumInterceptWeight, maxDepth);
}
//-------------------------------------------------------------------------
/**
* @param remainingAspatialFeatures
* @param remainingAspatialWeights
* @param remainingSpatialFeatures
* @param remainingSpatialWeights
* @param accumInterceptWeight
* @param allowedDepth
* @return Newly built node for logit tree, for given data
*/
private static LogitTreeNode buildNode
(
final List<AspatialFeature> remainingAspatialFeatures,
final TFloatArrayList remainingAspatialWeights,
final List<SpatialFeature> remainingSpatialFeatures,
final TFloatArrayList remainingSpatialWeights,
final float accumInterceptWeight,
final int allowedDepth
)
{
if (remainingAspatialFeatures.isEmpty() && remainingSpatialFeatures.isEmpty())
{
// Time to create leaf node: a model with just a single intercept feature
return new LogitModelNode(new Feature[] {InterceptFeature.instance()}, new float[] {accumInterceptWeight});
}
if (allowedDepth == 0)
{
// Have to create leaf node with remaining features
final int numModelFeatures = remainingAspatialFeatures.size() + remainingSpatialFeatures.size() + 1;
final Feature[] featuresArray = new Feature[numModelFeatures];
final float[] weightsArray = new float[numModelFeatures];
int nextIdx = 0;
// Start with intercept
featuresArray[nextIdx] = InterceptFeature.instance();
weightsArray[nextIdx++] = accumInterceptWeight;
// Now aspatial features
for (int i = 0; i < remainingAspatialFeatures.size(); ++i)
{
featuresArray[nextIdx] = remainingAspatialFeatures.get(i);
weightsArray[nextIdx++] = remainingAspatialWeights.getQuick(i);
}
// And finally spatial features
for (int i = 0; i < remainingSpatialFeatures.size(); ++i)
{
featuresArray[nextIdx] = remainingSpatialFeatures.get(i);
weightsArray[nextIdx++] = remainingSpatialWeights.getQuick(i);
}
return new LogitModelNode(featuresArray, weightsArray);
}
// Find optimal splitting feature. As optimal splitting criterion, we try to
// get the lowest average (between our two children) sum of absolute weight values
// in remaining non-intercept features.
float lowestScore = Float.POSITIVE_INFINITY;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
float sumAllAbsWeights = 0.f;
for (int i = 0; i < remainingAspatialWeights.size(); ++i)
{
sumAllAbsWeights += Math.abs(remainingAspatialWeights.getQuick(i));
}
for (int i = 0; i < remainingSpatialWeights.size(); ++i)
{
sumAllAbsWeights += Math.abs(remainingSpatialWeights.getQuick(i));
}
for (int i = 0; i < remainingAspatialFeatures.size(); ++i)
{
// Since we already filtered out intercept terms, we know that whenever any other
// aspatial feature is true, all the spatial features must be false (so then we
// can absorb all their weights at once). If an aspatial feature is false, we
// do not lose any other weights.
final float absFeatureWeight = Math.abs(remainingAspatialWeights.getQuick(i));
float falseScore = sumAllAbsWeights - absFeatureWeight;
float trueScore = sumAllAbsWeights - absFeatureWeight;
for (int j = 0; j < remainingSpatialWeights.size(); ++j)
{
trueScore -= Math.abs(remainingSpatialWeights.getQuick(j));
}
final float splitScore = (falseScore + trueScore) / 2.f;
if (splitScore < lowestScore)
{
lowestScore = splitScore;
bestIdx = i;
}
}
for (int i = 0; i < remainingSpatialFeatures.size(); ++i)
{
final SpatialFeature spatial = remainingSpatialFeatures.get(i);
final float absFeatureWeight = Math.abs(remainingSpatialWeights.getQuick(i));
float falseScore = sumAllAbsWeights - absFeatureWeight;
float trueScore = sumAllAbsWeights - absFeatureWeight;
// If a spatial feature is true, we lose all the aspatial weights (none of them can be true)
for (int j = 0; j < remainingAspatialWeights.size(); ++j)
{
trueScore -= Math.abs(remainingAspatialWeights.getQuick(j));
}
for (int j = 0; j < remainingSpatialFeatures.size(); ++j)
{
if (i == j)
continue;
final SpatialFeature otherFeature = remainingSpatialFeatures.get(j);
if (otherFeature.generalises(spatial))
{
// The other feature generalises the splitting candidate. This means
// that in the branch where the splitting candidate is true, this
// feature must also be true and we can therefore also absorb its
// weight.
final float otherAbsWeight = Math.abs(remainingSpatialWeights.getQuick(j));
trueScore -= otherAbsWeight;
}
if (spatial.generalises(otherFeature))
{
// The splitting candidate generalises the other feature. This means
// that in the branch where the splitting candidate is false, the other
// feature must also be false and we can therefore also absorb its
// weight.
final float otherAbsWeight = Math.abs(remainingSpatialWeights.getQuick(j));
falseScore -= otherAbsWeight;
}
}
final float splitScore = (falseScore + trueScore) / 2.f;
if (splitScore < lowestScore)
{
lowestScore = splitScore;
bestIdx = i;
bestFeatureIsAspatial = false;
}
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = remainingAspatialFeatures.get(bestIdx);
else
splittingFeature = remainingSpatialFeatures.get(bestIdx);
// Create the node for case where splitting feature is true
final LogitTreeNode trueBranch;
{
final List<AspatialFeature> remainingAspatialsWhenTrue;
final TFloatArrayList remainingAspatialWeightsWhenTrue;
final List<SpatialFeature> remainingSpatialsWhenTrue;
final TFloatArrayList remainingSpatialWeightsWhenTrue;
float accumInterceptWhenTrue = accumInterceptWeight;
if (bestFeatureIsAspatial)
{
// Remove the aspatial feature that we split on
remainingAspatialsWhenTrue = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenTrue = new TFloatArrayList(remainingAspatialWeights);
ListUtils.removeSwap(remainingAspatialsWhenTrue, bestIdx);
accumInterceptWhenTrue += remainingAspatialWeightsWhenTrue.getQuick(bestIdx);
ListUtils.removeSwap(remainingAspatialWeightsWhenTrue, bestIdx);
// Remove all spatial features when an aspatial feature is true
remainingSpatialsWhenTrue = new ArrayList<SpatialFeature>();
remainingSpatialWeightsWhenTrue = new TFloatArrayList();
}
else
{
// Remove all the aspatial features if a spatial feature is true
remainingAspatialsWhenTrue = new ArrayList<AspatialFeature>();
remainingAspatialWeightsWhenTrue = new TFloatArrayList();
// Remove all spatial features that are more general than our splitting feature + the splitting feature
remainingSpatialsWhenTrue = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenTrue = new TFloatArrayList(remainingSpatialWeights);
for (int i = remainingSpatialsWhenTrue.size() - 1; i >= 0; --i)
{
if (i == bestIdx)
{
ListUtils.removeSwap(remainingSpatialsWhenTrue, i);
accumInterceptWhenTrue += remainingSpatialWeightsWhenTrue.getQuick(i);
ListUtils.removeSwap(remainingSpatialWeightsWhenTrue, i);
}
else
{
final SpatialFeature other = remainingSpatialsWhenTrue.get(i);
if (other.generalises((SpatialFeature)splittingFeature))
{
ListUtils.removeSwap(remainingSpatialsWhenTrue, i);
accumInterceptWhenTrue += remainingSpatialWeightsWhenTrue.getQuick(i);
ListUtils.removeSwap(remainingSpatialWeightsWhenTrue, i);
}
}
}
}
trueBranch =
buildNode
(
remainingAspatialsWhenTrue,
remainingAspatialWeightsWhenTrue,
remainingSpatialsWhenTrue,
remainingSpatialWeightsWhenTrue,
accumInterceptWhenTrue,
allowedDepth - 1
);
}
// Create the node for case where splitting feature is false
final LogitTreeNode falseBranch;
{
final List<AspatialFeature> remainingAspatialsWhenFalse;
final TFloatArrayList remainingAspatialWeightsWhenFalse;
final List<SpatialFeature> remainingSpatialsWhenFalse;
final TFloatArrayList remainingSpatialWeightsWhenFalse;
float accumInterceptWhenFalse = accumInterceptWeight;
if (bestFeatureIsAspatial)
{
// Remove the aspatial feature that we split on
remainingAspatialsWhenFalse = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenFalse = new TFloatArrayList(remainingAspatialWeights);
ListUtils.removeSwap(remainingAspatialsWhenFalse, bestIdx);
ListUtils.removeSwap(remainingAspatialWeightsWhenFalse, bestIdx);
// Keep all spatial features when an aspatial feature is false
remainingSpatialsWhenFalse = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenFalse = new TFloatArrayList(remainingSpatialWeights);
}
else
{
// Keep all the aspatial features if a spatial feature is false
remainingAspatialsWhenFalse = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenFalse = new TFloatArrayList(remainingAspatialWeights);
// Remove all spatial features that are generalised by our splitting feature + the splitting feature
remainingSpatialsWhenFalse = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenFalse = new TFloatArrayList(remainingSpatialWeights);
for (int i = remainingSpatialsWhenFalse.size() - 1; i >= 0; --i)
{
if (i == bestIdx)
{
ListUtils.removeSwap(remainingSpatialsWhenFalse, i);
ListUtils.removeSwap(remainingSpatialWeightsWhenFalse, i);
}
else
{
final SpatialFeature other = remainingSpatialsWhenFalse.get(i);
if (((SpatialFeature)splittingFeature).generalises(other))
{
ListUtils.removeSwap(remainingSpatialsWhenFalse, i);
ListUtils.removeSwap(remainingSpatialWeightsWhenFalse, i);
}
}
}
}
falseBranch =
buildNode
(
remainingAspatialsWhenFalse,
remainingAspatialWeightsWhenFalse,
remainingSpatialsWhenFalse,
remainingSpatialWeightsWhenFalse,
accumInterceptWhenFalse,
allowedDepth - 1
);
}
return new LogitDecisionNode(splittingFeature, trueBranch, falseBranch);
}
/**
* Uses naive approach of splitting on features with max absolute weight.
*
* @param remainingAspatialFeatures
* @param remainingAspatialWeights
* @param remainingSpatialFeatures
* @param remainingSpatialWeights
* @param accumInterceptWeight
* @param allowedDepth
* @return Newly built node for logit tree, for given data
*/
private static LogitTreeNode buildNodeNaiveMaxAbs
(
final List<AspatialFeature> remainingAspatialFeatures,
final TFloatArrayList remainingAspatialWeights,
final List<SpatialFeature> remainingSpatialFeatures,
final TFloatArrayList remainingSpatialWeights,
final float accumInterceptWeight,
final int allowedDepth
)
{
if (remainingAspatialFeatures.isEmpty() && remainingSpatialFeatures.isEmpty())
{
// Time to create leaf node: a model with just a single intercept feature
return new LogitModelNode(new Feature[] {InterceptFeature.instance()}, new float[] {accumInterceptWeight});
}
if (allowedDepth == 0)
{
// Have to create leaf node with remaining features
final int numModelFeatures = remainingAspatialFeatures.size() + remainingSpatialFeatures.size() + 1;
final Feature[] featuresArray = new Feature[numModelFeatures];
final float[] weightsArray = new float[numModelFeatures];
int nextIdx = 0;
// Start with intercept
featuresArray[nextIdx] = InterceptFeature.instance();
weightsArray[nextIdx++] = accumInterceptWeight;
// Now aspatial features
for (int i = 0; i < remainingAspatialFeatures.size(); ++i)
{
featuresArray[nextIdx] = remainingAspatialFeatures.get(i);
weightsArray[nextIdx++] = remainingAspatialWeights.getQuick(i);
}
// And finally spatial features
for (int i = 0; i < remainingSpatialFeatures.size(); ++i)
{
featuresArray[nextIdx] = remainingSpatialFeatures.get(i);
weightsArray[nextIdx++] = remainingSpatialWeights.getQuick(i);
}
return new LogitModelNode(featuresArray, weightsArray);
}
// Find optimal splitting feature. As optimal splitting criterion, we try to
// get the lowest average (between our two children) sum of absolute weight values
// in remaining non-intercept features.
float lowestScore = Float.POSITIVE_INFINITY;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
float sumAllAbsWeights = 0.f;
for (int i = 0; i < remainingAspatialWeights.size(); ++i)
{
sumAllAbsWeights += Math.abs(remainingAspatialWeights.getQuick(i));
}
for (int i = 0; i < remainingSpatialWeights.size(); ++i)
{
sumAllAbsWeights += Math.abs(remainingSpatialWeights.getQuick(i));
}
for (int i = 0; i < remainingAspatialFeatures.size(); ++i)
{
final float absFeatureWeight = Math.abs(remainingAspatialWeights.getQuick(i));
float falseScore = sumAllAbsWeights - absFeatureWeight;
float trueScore = sumAllAbsWeights - absFeatureWeight;
final float splitScore = (falseScore + trueScore) / 2.f;
if (splitScore < lowestScore)
{
lowestScore = splitScore;
bestIdx = i;
}
}
for (int i = 0; i < remainingSpatialFeatures.size(); ++i)
{
final float absFeatureWeight = Math.abs(remainingSpatialWeights.getQuick(i));
float falseScore = sumAllAbsWeights - absFeatureWeight;
float trueScore = sumAllAbsWeights - absFeatureWeight;
final float splitScore = (falseScore + trueScore) / 2.f;
if (splitScore < lowestScore)
{
lowestScore = splitScore;
bestIdx = i;
bestFeatureIsAspatial = false;
}
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = remainingAspatialFeatures.get(bestIdx);
else
splittingFeature = remainingSpatialFeatures.get(bestIdx);
// Create the node for case where splitting feature is true
final LogitTreeNode trueBranch;
{
final List<AspatialFeature> remainingAspatialsWhenTrue;
final TFloatArrayList remainingAspatialWeightsWhenTrue;
final List<SpatialFeature> remainingSpatialsWhenTrue;
final TFloatArrayList remainingSpatialWeightsWhenTrue;
float accumInterceptWhenTrue = accumInterceptWeight;
if (bestFeatureIsAspatial)
{
// Remove the aspatial feature that we split on
remainingAspatialsWhenTrue = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenTrue = new TFloatArrayList(remainingAspatialWeights);
ListUtils.removeSwap(remainingAspatialsWhenTrue, bestIdx);
accumInterceptWhenTrue += remainingAspatialWeightsWhenTrue.getQuick(bestIdx);
ListUtils.removeSwap(remainingAspatialWeightsWhenTrue, bestIdx);
// Remove all spatial features when an aspatial feature is true
remainingSpatialsWhenTrue = new ArrayList<SpatialFeature>();
remainingSpatialWeightsWhenTrue = new TFloatArrayList();
}
else
{
// Remove all the aspatial features if a spatial feature is true
remainingAspatialsWhenTrue = new ArrayList<AspatialFeature>();
remainingAspatialWeightsWhenTrue = new TFloatArrayList();
// Remove all spatial features that are more general than our splitting feature + the splitting feature
remainingSpatialsWhenTrue = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenTrue = new TFloatArrayList(remainingSpatialWeights);
for (int i = remainingSpatialsWhenTrue.size() - 1; i >= 0; --i)
{
if (i == bestIdx)
{
ListUtils.removeSwap(remainingSpatialsWhenTrue, i);
accumInterceptWhenTrue += remainingSpatialWeightsWhenTrue.getQuick(i);
ListUtils.removeSwap(remainingSpatialWeightsWhenTrue, i);
}
else
{
final SpatialFeature other = remainingSpatialsWhenTrue.get(i);
if (other.generalises((SpatialFeature)splittingFeature))
{
ListUtils.removeSwap(remainingSpatialsWhenTrue, i);
accumInterceptWhenTrue += remainingSpatialWeightsWhenTrue.getQuick(i);
ListUtils.removeSwap(remainingSpatialWeightsWhenTrue, i);
}
}
}
}
trueBranch =
buildNodeNaiveMaxAbs
(
remainingAspatialsWhenTrue,
remainingAspatialWeightsWhenTrue,
remainingSpatialsWhenTrue,
remainingSpatialWeightsWhenTrue,
accumInterceptWhenTrue,
allowedDepth - 1
);
}
// Create the node for case where splitting feature is false
final LogitTreeNode falseBranch;
{
final List<AspatialFeature> remainingAspatialsWhenFalse;
final TFloatArrayList remainingAspatialWeightsWhenFalse;
final List<SpatialFeature> remainingSpatialsWhenFalse;
final TFloatArrayList remainingSpatialWeightsWhenFalse;
float accumInterceptWhenFalse = accumInterceptWeight;
if (bestFeatureIsAspatial)
{
// Remove the aspatial feature that we split on
remainingAspatialsWhenFalse = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenFalse = new TFloatArrayList(remainingAspatialWeights);
ListUtils.removeSwap(remainingAspatialsWhenFalse, bestIdx);
ListUtils.removeSwap(remainingAspatialWeightsWhenFalse, bestIdx);
// Keep all spatial features when an aspatial feature is false
remainingSpatialsWhenFalse = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenFalse = new TFloatArrayList(remainingSpatialWeights);
}
else
{
// Keep all the aspatial features if a spatial feature is false
remainingAspatialsWhenFalse = new ArrayList<AspatialFeature>(remainingAspatialFeatures);
remainingAspatialWeightsWhenFalse = new TFloatArrayList(remainingAspatialWeights);
// Remove all spatial features that are generalised by our splitting feature + the splitting feature
remainingSpatialsWhenFalse = new ArrayList<SpatialFeature>(remainingSpatialFeatures);
remainingSpatialWeightsWhenFalse = new TFloatArrayList(remainingSpatialWeights);
for (int i = remainingSpatialsWhenFalse.size() - 1; i >= 0; --i)
{
if (i == bestIdx)
{
ListUtils.removeSwap(remainingSpatialsWhenFalse, i);
ListUtils.removeSwap(remainingSpatialWeightsWhenFalse, i);
}
else
{
final SpatialFeature other = remainingSpatialsWhenFalse.get(i);
if (((SpatialFeature)splittingFeature).generalises(other))
{
ListUtils.removeSwap(remainingSpatialsWhenFalse, i);
ListUtils.removeSwap(remainingSpatialWeightsWhenFalse, i);
}
}
}
}
falseBranch =
buildNodeNaiveMaxAbs
(
remainingAspatialsWhenFalse,
remainingAspatialWeightsWhenFalse,
remainingSpatialsWhenFalse,
remainingSpatialWeightsWhenFalse,
accumInterceptWhenFalse,
allowedDepth - 1
);
}
return new LogitDecisionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 24,286 | 34.300872 | 132 | java |
Ludii | Ludii-master/AI/src/decision_trees/logits/ExperienceLogitTreeLearner.java | package decision_trees.logits;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.aspatial.InterceptFeature;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import training.expert_iteration.ExItExperience;
import utils.data_structures.experience_buffers.ExperienceBuffer;
/**
* Class with methods for learning logit trees from experience.
*
* @author Dennis Soemers
*/
public class ExperienceLogitTreeLearner
{
//-------------------------------------------------------------------------
/**
* Builds an exact logit tree node for given feature set and experience buffer
* @param featureSet
* @param linFunc
* @param buffer
* @param maxDepth
* @param minSamplesPerLeaf
* @return Root node of the generated tree
*/
public static LogitTreeNode buildTree
(
final BaseFeatureSet featureSet,
final LinearFunction linFunc,
final ExperienceBuffer buffer,
final int maxDepth,
final int minSamplesPerLeaf
)
{
final WeightVector oracleWeightVector = linFunc.effectiveParams();
final ExItExperience[] samples = buffer.allExperience();
final List<FeatureVector> allFeatureVectors = new ArrayList<FeatureVector>();
final TFloatArrayList allTargetLogits = new TFloatArrayList();
for (final ExItExperience sample : samples)
{
if (sample != null && sample.moves().size() > 1)
{
final FeatureVector[] featureVectors = sample.generateFeatureVectors(featureSet);
for (final FeatureVector featureVector : featureVectors)
{
allFeatureVectors.add(featureVector);
allTargetLogits.add(oracleWeightVector.dot(featureVector));
}
}
}
return buildNode
(
featureSet,
allFeatureVectors,
allTargetLogits,
new BitSet(), new BitSet(),
featureSet.getNumAspatialFeatures(), featureSet.getNumSpatialFeatures(),
maxDepth,
minSamplesPerLeaf
);
}
//-------------------------------------------------------------------------
/**
* @param featureSet
* @param remainingFeatureVectors
* @param remainingTargetLogits
* @param alreadyPickedAspatials
* @param alreadyPickedSpatials
* @param numAspatialFeatures
* @param numSpatialFeatures
* @param allowedDepth
* @param minSamplesPerLeaf
* @return Newly built node for logit tree, for given data
*/
private static LogitTreeNode buildNode
(
final BaseFeatureSet featureSet,
final List<FeatureVector> remainingFeatureVectors,
final TFloatArrayList remainingTargetLogits,
final BitSet alreadyPickedAspatials,
final BitSet alreadyPickedSpatials,
final int numAspatialFeatures,
final int numSpatialFeatures,
final int allowedDepth,
final int minSamplesPerLeaf
)
{
if (minSamplesPerLeaf <= 0)
throw new IllegalArgumentException("minSamplesPerLeaf must be greater than 0");
if (remainingFeatureVectors.isEmpty())
{
return new LogitModelNode(new Feature[] {InterceptFeature.instance()}, new float[] {0.f});
}
if (allowedDepth == 0)
{
// Have to create leaf node here TODO could in theory use remaining features to compute a model again
final float meanLogit = remainingTargetLogits.sum() / remainingTargetLogits.size();
return new LogitModelNode(new Feature[] {InterceptFeature.instance()}, new float[] {meanLogit});
}
// For every aspatial and every spatial feature, if not already picked, compute mean logits for true and false branches
final double[] sumLogitsIfFalseAspatial = new double[numAspatialFeatures];
final int[] numFalseAspatial = new int[numAspatialFeatures];
final double[] sumLogitsIfTrueAspatial = new double[numAspatialFeatures];
final int[] numTrueAspatial = new int[numAspatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (alreadyPickedAspatials.get(i))
continue;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetLogit = remainingTargetLogits.getQuick(j);
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
{
sumLogitsIfTrueAspatial[i] += targetLogit;
++numTrueAspatial[i];
}
else
{
sumLogitsIfFalseAspatial[i] += targetLogit;
++numFalseAspatial[i];
}
}
}
final double[] sumLogitsIfFalseSpatial = new double[numSpatialFeatures];
final int[] numFalseSpatial = new int[numSpatialFeatures];
final double[] sumLogitsIfTrueSpatial = new double[numSpatialFeatures];
final int[] numTrueSpatial = new int[numSpatialFeatures];
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
final FeatureVector featureVector = remainingFeatureVectors.get(i);
final float targetLogit = remainingTargetLogits.getQuick(i);
final boolean[] active = new boolean[numSpatialFeatures];
final TIntArrayList sparseSpatials = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < sparseSpatials.size(); ++j)
{
active[sparseSpatials.getQuick(j)] = true;
}
for (int j = 0; j < active.length; ++j)
{
if (alreadyPickedSpatials.get(j))
continue;
if (active[j])
{
sumLogitsIfTrueSpatial[j] += targetLogit;
++numTrueSpatial[j];
}
else
{
sumLogitsIfFalseSpatial[j] += targetLogit;
++numFalseSpatial[j];
}
}
}
final double[] meanLogitsIfFalseAspatial = new double[numAspatialFeatures];
final double[] meanLogitsIfTrueAspatial = new double[numAspatialFeatures];
final double[] meanLogitsIfFalseSpatial = new double[numSpatialFeatures];
final double[] meanLogitsIfTrueSpatial = new double[numSpatialFeatures];
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] > 0)
meanLogitsIfFalseAspatial[i] = sumLogitsIfFalseAspatial[i] / numFalseAspatial[i];
if (numTrueAspatial[i] > 0)
meanLogitsIfTrueAspatial[i] = sumLogitsIfTrueAspatial[i] / numTrueAspatial[i];
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] > 0)
meanLogitsIfFalseSpatial[i] = sumLogitsIfFalseSpatial[i] / numFalseSpatial[i];
if (numTrueSpatial[i] > 0)
meanLogitsIfTrueSpatial[i] = sumLogitsIfTrueSpatial[i] / numTrueSpatial[i];
}
// Find feature that maximally reduces sum of squared errors
double minSumSquaredErrors = Double.POSITIVE_INFINITY;
double maxSumSquaredErrors = Double.NEGATIVE_INFINITY;
int bestIdx = -1;
boolean bestFeatureIsAspatial = true;
for (int i = 0; i < numAspatialFeatures; ++i)
{
if (numFalseAspatial[i] < minSamplesPerLeaf || numTrueAspatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetLogit = remainingTargetLogits.getQuick(j);
final double error;
if (featureVector.aspatialFeatureValues().get(i) != 0.f)
error = targetLogit - meanLogitsIfTrueAspatial[i];
else
error = targetLogit - meanLogitsIfFalseAspatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
for (int i = 0; i < numSpatialFeatures; ++i)
{
if (numFalseSpatial[i] < minSamplesPerLeaf || numTrueSpatial[i] < minSamplesPerLeaf)
continue;
double sumSquaredErrors = 0.0;
for (int j = 0; j < remainingFeatureVectors.size(); ++j)
{
final FeatureVector featureVector = remainingFeatureVectors.get(j);
final float targetLogit = remainingTargetLogits.getQuick(j);
final double error;
if (featureVector.activeSpatialFeatureIndices().contains(i))
error = targetLogit - meanLogitsIfTrueSpatial[i];
else
error = targetLogit - meanLogitsIfFalseSpatial[i];
sumSquaredErrors += (error * error);
}
if (sumSquaredErrors < minSumSquaredErrors)
{
minSumSquaredErrors = sumSquaredErrors;
bestIdx = i;
bestFeatureIsAspatial = false;
}
if (sumSquaredErrors > maxSumSquaredErrors)
{
maxSumSquaredErrors = sumSquaredErrors;
}
}
if (bestIdx == -1 || minSumSquaredErrors == 0.0 || minSumSquaredErrors == maxSumSquaredErrors)
{
// No point in making any split at all, so just make leaf TODO could in theory use remaining features to compute a model again
final float meanLogit = remainingTargetLogits.sum() / remainingTargetLogits.size();
return new LogitModelNode(new Feature[] {InterceptFeature.instance()}, new float[] {meanLogit});
}
final Feature splittingFeature;
if (bestFeatureIsAspatial)
splittingFeature = featureSet.aspatialFeatures()[bestIdx];
else
splittingFeature = featureSet.spatialFeatures()[bestIdx];
final BitSet newAlreadyPickedAspatials;
final BitSet newAlreadyPickedSpatials;
if (bestFeatureIsAspatial)
{
newAlreadyPickedAspatials = (BitSet) alreadyPickedAspatials.clone();
newAlreadyPickedAspatials.set(bestIdx);
newAlreadyPickedSpatials = alreadyPickedSpatials;
}
else
{
newAlreadyPickedSpatials = (BitSet) alreadyPickedSpatials.clone();
newAlreadyPickedSpatials.set(bestIdx);
newAlreadyPickedAspatials = alreadyPickedAspatials;
}
// Split remaining data for the two branches
final List<FeatureVector> remainingFeatureVectorsTrue = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetLogitsTrue = new TFloatArrayList();
final List<FeatureVector> remainingFeatureVectorsFalse = new ArrayList<FeatureVector>();
final TFloatArrayList remainingTargetLogitsFalse = new TFloatArrayList();
if (bestFeatureIsAspatial)
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).aspatialFeatureValues().get(bestIdx) != 0.f)
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetLogitsTrue.add(remainingTargetLogits.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetLogitsFalse.add(remainingTargetLogits.getQuick(i));
}
}
}
else
{
for (int i = 0; i < remainingFeatureVectors.size(); ++i)
{
if (remainingFeatureVectors.get(i).activeSpatialFeatureIndices().contains(bestIdx))
{
remainingFeatureVectorsTrue.add(remainingFeatureVectors.get(i));
remainingTargetLogitsTrue.add(remainingTargetLogits.getQuick(i));
}
else
{
remainingFeatureVectorsFalse.add(remainingFeatureVectors.get(i));
remainingTargetLogitsFalse.add(remainingTargetLogits.getQuick(i));
}
}
}
// Create the node for case where splitting feature is true
final LogitTreeNode trueBranch;
{
trueBranch =
buildNode
(
featureSet,
remainingFeatureVectorsTrue,
remainingTargetLogitsTrue,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
// Create the node for case where splitting feature is false
final LogitTreeNode falseBranch;
{
falseBranch =
buildNode
(
featureSet,
remainingFeatureVectorsFalse,
remainingTargetLogitsFalse,
newAlreadyPickedAspatials,
newAlreadyPickedSpatials,
numAspatialFeatures,
numSpatialFeatures,
allowedDepth - 1,
minSamplesPerLeaf
);
}
return new LogitDecisionNode(splittingFeature, trueBranch, falseBranch);
}
//-------------------------------------------------------------------------
}
| 11,953 | 29.730077 | 130 | java |
Ludii | Ludii-master/AI/src/decision_trees/logits/LogitDecisionNode.java | package decision_trees.logits;
import features.Feature;
import features.FeatureVector;
import features.aspatial.AspatialFeature;
import metadata.ai.features.trees.logits.If;
import metadata.ai.features.trees.logits.LogitNode;
/**
* Decision node in a feature-based logit tree
*
* @author Dennis Soemers
*/
public class LogitDecisionNode extends LogitTreeNode
{
//-------------------------------------------------------------------------
/** The feature we want to evaluate (our condition) */
protected final Feature feature;
/** Node we should traverse to if feature is true */
protected final LogitTreeNode trueNode;
/** Node we should traverse to if feature is false */
protected final LogitTreeNode falseNode;
/** Index of the feature we look at in our feature set (may index into either aspatial or spatial features list) */
protected int featureIdx = -1;
//-------------------------------------------------------------------------
/**
* Constructor
* @param feature
* @param trueNode Node we should traverse to if feature is true
* @param falseNode Node we should traverse to if feature is false
*/
public LogitDecisionNode
(
final Feature feature,
final LogitTreeNode trueNode,
final LogitTreeNode falseNode
)
{
this.feature = feature;
this.trueNode = trueNode;
this.falseNode = falseNode;
}
/**
* Constructor
* @param feature
* @param trueNode Node we should traverse to if feature is true
* @param falseNode Node we should traverse to if feature is false
* @param featureIdx Index of the feature
*/
public LogitDecisionNode
(
final Feature feature,
final LogitTreeNode trueNode,
final LogitTreeNode falseNode,
final int featureIdx
)
{
this.feature = feature;
this.trueNode = trueNode;
this.falseNode = falseNode;
this.featureIdx = featureIdx;
}
//-------------------------------------------------------------------------
@Override
public float predict(final FeatureVector featureVector)
{
if (feature instanceof AspatialFeature)
{
if (featureVector.aspatialFeatureValues().get(featureIdx) != 0.f)
return trueNode.predict(featureVector);
else
return falseNode.predict(featureVector);
}
else
{
if (featureVector.activeSpatialFeatureIndices().contains(featureIdx))
return trueNode.predict(featureVector);
else
return falseNode.predict(featureVector);
}
}
//-------------------------------------------------------------------------
@Override
public LogitNode toMetadataNode()
{
return new If(feature.toString(), trueNode.toMetadataNode(), falseNode.toMetadataNode());
}
//-------------------------------------------------------------------------
}
| 2,715 | 25.115385 | 116 | java |
Ludii | Ludii-master/AI/src/decision_trees/logits/LogitModelNode.java | package decision_trees.logits;
import features.Feature;
import features.FeatureVector;
import features.aspatial.AspatialFeature;
import metadata.ai.features.trees.logits.Leaf;
import metadata.ai.features.trees.logits.LogitNode;
import metadata.ai.misc.Pair;
/**
* Leaf node in a feature-based logit tree, with a linear model.
*
* @author Dennis Soemers
*/
public class LogitModelNode extends LogitTreeNode
{
//-------------------------------------------------------------------------
/** Array of remaining features */
protected final Feature[] features;
/** Array of weights for the remaining features */
protected final float[] weights;
/** Array of feature indices */
protected final int[] featureIndices;
//-------------------------------------------------------------------------
/**
* Constructor
* @param features
* @param weights
*/
public LogitModelNode
(
final Feature[] features,
final float[] weights
)
{
this.features = features;
this.weights = weights;
featureIndices = null;
}
/**
* Constructor
* @param features
* @param weights
* @param featureIndices
*/
public LogitModelNode
(
final Feature[] features,
final float[] weights,
final int[] featureIndices
)
{
this.features = features;
this.weights = weights;
this.featureIndices = featureIndices;
}
//-------------------------------------------------------------------------
@Override
public float predict(final FeatureVector featureVector)
{
float dotProduct = 0.f;
for (int i = 0; i < features.length; ++i)
{
final Feature feature = features[i];
final int featureIdx = featureIndices[i];
if (feature instanceof AspatialFeature)
{
dotProduct += featureVector.aspatialFeatureValues().get(featureIdx) * weights[i];
}
else
{
if (featureVector.activeSpatialFeatureIndices().contains(featureIdx))
dotProduct += weights[i];
}
}
return dotProduct;
}
//-------------------------------------------------------------------------
@Override
public LogitNode toMetadataNode()
{
final Pair[] pairs = new Pair[features.length];
for (int i= 0; i < pairs.length; ++i)
{
pairs[i] = new Pair(features[i].toString(), Float.valueOf(weights[i]));
}
return new Leaf(pairs);
}
//-------------------------------------------------------------------------
}
| 2,374 | 21.196262 | 85 | java |
Ludii | Ludii-master/AI/src/decision_trees/logits/LogitTreeNode.java | package decision_trees.logits;
import features.Feature;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
import metadata.ai.features.trees.logits.LogitNode;
/**
* Abstract class for a node in a feature-based regression tree
* that should output logits.
*
* @author Dennis Soemers
*/
public abstract class LogitTreeNode
{
//-------------------------------------------------------------------------
/**
* @param featureVector
* @return Predicted logit for given feature vector
*/
public abstract float predict(final FeatureVector featureVector);
//-------------------------------------------------------------------------
/**
* Convert to tree in metadata format.
* @return logit node.
*/
public abstract LogitNode toMetadataNode();
//-------------------------------------------------------------------------
/**
* Constructs a node (and hence, tree) from the given metadata node.
* @param metadataNode
* @param featureSet
* @return Constructed node
*/
public static LogitTreeNode fromMetadataNode(final LogitNode metadataNode, final BaseFeatureSet featureSet)
{
if (metadataNode instanceof metadata.ai.features.trees.logits.If)
{
final metadata.ai.features.trees.logits.If ifNode = (metadata.ai.features.trees.logits.If) metadataNode;
final LogitTreeNode thenBranch = fromMetadataNode(ifNode.thenNode(), featureSet);
final LogitTreeNode elseBranch = fromMetadataNode(ifNode.elseNode(), featureSet);
final String featureString = ifNode.featureString();
final int featureIdx = featureSet.findFeatureIndexForString(featureString);
final Feature feature;
if (featureIdx < featureSet.aspatialFeatures().length)
{
if (featureSet.aspatialFeatures()[featureIdx].toString().equals(featureString))
feature = featureSet.aspatialFeatures()[featureIdx];
else
feature = featureSet.spatialFeatures()[featureIdx];
}
else
{
feature = featureSet.spatialFeatures()[featureIdx];
}
return new LogitDecisionNode(feature, thenBranch, elseBranch, featureIdx);
}
else
{
final metadata.ai.features.trees.logits.Leaf leafNode = (metadata.ai.features.trees.logits.Leaf) metadataNode;
final String[] featureStrings = leafNode.featureStrings();
final float[] weights = leafNode.weights();
final int[] featureIndices = new int[featureStrings.length];
final Feature[] features = new Feature[featureStrings.length];
for (int i = 0; i < features.length; ++i)
{
final String featureString = featureStrings[i];
final int featureIdx = featureSet.findFeatureIndexForString(featureString);
final Feature feature;
if (featureIdx < featureSet.aspatialFeatures().length)
{
if (featureSet.aspatialFeatures()[featureIdx].toString().equals(featureString))
feature = featureSet.aspatialFeatures()[featureIdx];
else
feature = featureSet.spatialFeatures()[featureIdx];
}
else
{
feature = featureSet.spatialFeatures()[featureIdx];
}
features[i] = feature;
featureIndices[i] = featureIdx;
}
return new LogitModelNode(features, weights, featureIndices);
}
}
//-------------------------------------------------------------------------
}
| 3,260 | 30.660194 | 113 | java |
Ludii | Ludii-master/AI/src/function_approx/BoostedLinearFunction.java | package function_approx;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import features.WeightVector;
import gnu.trove.list.array.TFloatArrayList;
import main.collections.FVector;
/**
* A linear function approximator that uses another linear function for boosting
* (the effective params of this approximator are the sum of the trainable
* params and the effective params of the boosting function).
*
* @author Dennis Soemers
*/
public class BoostedLinearFunction extends LinearFunction
{
//-------------------------------------------------------------------------
/** Function of which we use the effective params for boosting */
protected final LinearFunction booster;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param theta Trainable parameters vector
* @param booster Linear function of which we add the parameters to our trainable parameters
*/
public BoostedLinearFunction(final WeightVector theta, final LinearFunction booster)
{
super(theta);
this.booster = booster;
}
//-------------------------------------------------------------------------
/**
* @return Vector of effective parameters, used for making predictions. For this
* class, the trainable params plus the effective params of the booster.
*/
@Override
public WeightVector effectiveParams()
{
final FVector params = booster.effectiveParams().allWeights().copy();
params.add(trainableParams().allWeights());
return new WeightVector(params);
}
//-------------------------------------------------------------------------
/**
* Writes Linear function to the given filepath.
*/
@Override
public void writeToFile(final String filepath, final String[] featureSetFiles)
{
try (final PrintWriter writer = new PrintWriter(filepath, "UTF-8"))
{
for (int i = 0; i < theta.allWeights().dim(); ++i)
{
writer.println(theta.allWeights().get(i));
}
for (final String fsf : featureSetFiles)
{
writer.println("FeatureSet=" + new File(fsf).getName());
}
writer.println("Effective Params:");
final FVector effectiveParams = effectiveParams().allWeights();
for (int i = 0; i < effectiveParams.dim(); ++i)
{
writer.println(effectiveParams.get(i));
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
/**
* @param filepath
* @param booster
* @return Reads linear function from the given filepath.
*/
public static BoostedLinearFunction boostedFromFile(final String filepath, final LinearFunction booster)
{
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new FileInputStream(filepath), "UTF-8")))
{
final TFloatArrayList readFloats = new TFloatArrayList();
String featureSetFile = null;
String line;
while (true)
{
line = reader.readLine();
if (line == null)
{
break;
}
if (line.startsWith("FeatureSet="))
{
featureSetFile = line.substring("FeatureSet=".length());
}
else if (line.equals("Effective Params:"))
{
break;
}
else
{
readFloats.add(Float.parseFloat(line));
}
}
float[] floats = new float[readFloats.size()];
for (int i = 0; i < floats.length; ++i)
{
floats[i] = readFloats.getQuick(i);
}
LinearFunction boosterFunc = booster;
if (boosterFunc == null)
{
// Don't have a booster, so create a dummy linear function as booster
// such that the total effective params remain the same
final TFloatArrayList effectiveParams = new TFloatArrayList();
// we're first expecting a line saying "Effective Params:"
if (!line.equals("Effective Params:"))
{
System.err.println("Error in BoostedLinearFunction::boostedFromFile file! "
+ "Expected line: \"Effective Params:\"");
}
line = reader.readLine();
while (line != null)
{
effectiveParams.add(Float.parseFloat(line));
line = reader.readLine();
}
float[] boosterFloats = new float[effectiveParams.size()];
for (int i = 0; i < boosterFloats.length; ++i)
{
boosterFloats[i] = effectiveParams.getQuick(i) - floats[i];
}
boosterFunc = new LinearFunction(new WeightVector(FVector.wrap(boosterFloats)));
}
final BoostedLinearFunction func = new BoostedLinearFunction(new WeightVector(FVector.wrap(floats)), boosterFunc);
func.setFeatureSetFile(featureSetFile);
return func;
}
catch (final IOException e)
{
e.printStackTrace();
}
return null;
}
//-------------------------------------------------------------------------
}
| 4,803 | 25.251366 | 117 | java |
Ludii | Ludii-master/AI/src/function_approx/LinearFunction.java | package function_approx;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import features.FeatureVector;
import features.WeightVector;
import gnu.trove.list.array.TFloatArrayList;
import main.collections.FVector;
/**
* A linear function approximator
*
* @author Dennis Soemers
*/
public class LinearFunction
{
//-------------------------------------------------------------------------
/** Our vector of parameters / weights */
protected WeightVector theta;
/** Filepath for feature set corresponding to our parameters */
protected String featureSetFile = null;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param theta
*/
public LinearFunction(final WeightVector theta)
{
this.theta = theta;
}
//-------------------------------------------------------------------------
/**
* @param featureVector
* @return Predicted value for a given feature vector
*/
public float predict(final FeatureVector featureVector)
{
return effectiveParams().dot(featureVector);
}
/**
* @return Vector of effective parameters, used for making predictions. For this
* class, a reference to theta.
*/
public WeightVector effectiveParams()
{
return theta;
}
/**
* @return Reference to parameters vector that we can train. For this class,
* a reference to theta.
*/
public WeightVector trainableParams()
{
return theta;
}
//-------------------------------------------------------------------------
/**
* Replaces the linear function's param vector theta
* @param newTheta
*/
public void setTheta(final WeightVector newTheta)
{
theta = newTheta;
}
//-------------------------------------------------------------------------
/**
* @return Filename for corresponding Feature Set
*/
public String featureSetFile()
{
return featureSetFile;
}
/**
* Sets the filename for the corresponding Feature Set
* @param featureSetFile
*/
public void setFeatureSetFile(final String featureSetFile)
{
this.featureSetFile = featureSetFile;
}
//-------------------------------------------------------------------------
/**
* Writes linear function to the given filepath
* @param filepath
* @param featureSetFiles
*/
public void writeToFile(final String filepath, final String[] featureSetFiles)
{
try (final PrintWriter writer = new PrintWriter(filepath, "UTF-8"))
{
for (int i = 0; i < theta.allWeights().dim(); ++i)
{
writer.println(theta.allWeights().get(i));
}
for (final String fsf : featureSetFiles)
{
writer.println("FeatureSet=" + new File(fsf).getName());
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
/**
* @param filepath
* @return Reads linear function from the given filepath
*/
public static LinearFunction fromFile(final String filepath)
{
try
(
final BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(filepath), "UTF-8"))
)
{
final TFloatArrayList readFloats = new TFloatArrayList();
String featureSetFile = null;
while (true)
{
final String line = reader.readLine();
if (line == null)
break;
if (line.startsWith("FeatureSet="))
featureSetFile = line.substring("FeatureSet=".length());
else
readFloats.add(Float.parseFloat(line));
}
float[] floats = new float[readFloats.size()];
for (int i = 0; i < floats.length; ++i)
{
floats[i] = readFloats.getQuick(i);
}
final LinearFunction func = new LinearFunction(new WeightVector(FVector.wrap(floats)));
func.setFeatureSetFile(featureSetFile);
return func;
}
catch (final Exception e)
{
System.err.println("exception while trying to load from filepath: " + filepath);
e.printStackTrace();
}
return null;
}
//-------------------------------------------------------------------------
}
| 4,083 | 21.815642 | 114 | java |
Ludii | Ludii-master/AI/src/optimisers/AMSGrad.java | package optimisers;
import java.io.BufferedOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import main.collections.FVector;
/**
* AMSGrad optimizer, with the original bias corrections from Adam
* included again.
*
* @author Dennis Soemers
*/
public class AMSGrad extends Optimiser
{
//-------------------------------------------------------------------------
/** */
private static final long serialVersionUID = 1L;
//-------------------------------------------------------------------------
/** beta_1 constant */
protected final float beta1;
/** beta_2 constant */
protected final float beta2;
/** Small constant added to denominator */
protected final float epsilon;
/** Moving average of gradients */
private FVector movingAvgGradients = null;
/** Moving average of squared gradients */
private FVector movingAvgSquaredGradients = null;
/**
* Vector of maximum values encountered for moving averages of
* squared gradients
*/
private FVector maxMovingAvgSquaredGradients = null;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param baseStepSize
*/
public AMSGrad(final float baseStepSize)
{
super(baseStepSize);
this.beta1 = 0.9f;
this.beta2 = 0.999f;
this.epsilon = 1.E-8f;
}
/**
* Constructor
*
* @param baseStepSize
* @param beta1
* @param beta2
* @param epsilon
*/
public AMSGrad
(
final float baseStepSize,
final float beta1,
final float beta2,
final float epsilon
)
{
super(baseStepSize);
this.beta1 = beta1;
this.beta2 = beta2;
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public void maximiseObjective(
final FVector params,
final FVector gradients) {
if (movingAvgGradients == null)
{
// need to initialize vectors for moving averages
movingAvgGradients = new FVector(gradients.dim());
movingAvgSquaredGradients = new FVector(gradients.dim());
maxMovingAvgSquaredGradients = new FVector(gradients.dim());
}
else
{
// may have to grow moving average vectors if feature set grew
while (movingAvgGradients.dim() < gradients.dim())
{
movingAvgGradients = movingAvgGradients.append(0.f);
movingAvgSquaredGradients =
movingAvgSquaredGradients.append(0.f);
maxMovingAvgSquaredGradients =
maxMovingAvgSquaredGradients.append(0.f);
}
}
// update moving averages
movingAvgGradients.mult(beta1);
movingAvgGradients.addScaled(gradients, (1.f - beta1));
final FVector gradientsSquared = gradients.copy();
gradientsSquared.hadamardProduct(gradientsSquared);
movingAvgSquaredGradients.mult(beta2);
movingAvgSquaredGradients.addScaled(gradientsSquared, (1.f - beta2));
maxMovingAvgSquaredGradients = FVector.elementwiseMax(
maxMovingAvgSquaredGradients,
movingAvgSquaredGradients);
// compute update
final FVector velocity = movingAvgGradients.copy();
// division by 1 - beta1 is bias correction from Adam
velocity.mult(baseStepSize / (1.f - beta1));
final FVector denominator = maxMovingAvgSquaredGradients.copy();
// another bias correction from Adam
denominator.div(1.f - beta2);
denominator.sqrt();
denominator.add(epsilon);
velocity.elementwiseDivision(denominator);
params.add(velocity);
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs an AMSGrad object from instructions in the
* given array of lines
*/
public static AMSGrad fromLines(final String[] lines)
{
float baseStepSize = 3E-4f;
float beta1 = 0.9f;
float beta2 = 0.999f;
float epsilon = 1.E-8f;
for (String line : lines)
{
final String[] lineParts = line.split(",");
//-----------------------------------------------------------------
// main parts
//-----------------------------------------------------------------
if (lineParts[0].toLowerCase().startsWith("basestepsize="))
{
baseStepSize = Float.parseFloat(
lineParts[0].substring("basestepsize=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("beta1="))
{
beta1 = Float.parseFloat(
lineParts[0].substring("beta1=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("beta2="))
{
beta2 = Float.parseFloat(
lineParts[0].substring("beta2=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("epsilon="))
{
epsilon = Float.parseFloat(
lineParts[0].substring("epsilon=".length()));
}
}
return new AMSGrad(baseStepSize, beta1, beta2, epsilon);
}
//-------------------------------------------------------------------------
@Override
public void writeToFile(final String filepath)
{
try
(
final ObjectOutputStream out =
new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(filepath)))
)
{
out.writeObject(this);
out.flush();
out.close();
}
catch (final IOException e)
{
e.printStackTrace();
}
}
//-------------------------------------------------------------------------
}
| 5,224 | 24.241546 | 84 | java |
Ludii | Ludii-master/AI/src/optimisers/DeepmindRMSProp.java | package optimisers;
import java.io.BufferedOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import main.collections.FVector;
/**
* A variant of RMSProp that, as far as we're able to tell, DeepMind tends
* to use more often than standard RMSProp (for example in the original
* DQN Lua code).
*
* The primary differences in comparison to regular RMSProp are:
* 1) Usage of plain (not Nesterov) momentum
* 2) Centering by subtracting moving average of gradients in denominator.
* This means that gradients are normalized by the estimated variance of
* gradient, rather than the uncentered second moment (according to comments
* in TensorFlow implementation).
*
* This implementation specifically follows Equations (38) - (41) from
* https://arxiv.org/abs/1308.0850, which seems to be one of the only
* (if not the only) published sources for this particular variant of
* RMSProp.
*
* The TensorFlow implementation of RMSProp appears to be identical to this,
* when using momentum > 0.0 and centered = True.
*
* @author Dennis Soemers
*/
public class DeepmindRMSProp extends Optimiser
{
//-------------------------------------------------------------------------
/** */
private static final long serialVersionUID = 1L;
//-------------------------------------------------------------------------
/**
* Momentum term.
* "Velocity" of previous update is scaled by this value and added to
* subsequent update.
*/
protected final float momentum;
/**
* Decay factor used in updates of moving averages of (squared) gradients.
*/
protected final float decay;
/** Small constant added to denominator */
protected final float epsilon;
/**
* Last "velocity" vector. Used for momentum.
*/
private FVector lastVelocity = null;
/** Moving average of gradients */
private FVector movingAvgGradients = null;
/** Moving average of squared gradients */
private FVector movingAvgSquaredGradients = null;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public DeepmindRMSProp()
{
//super(0.005f);
super(0.05f);
this.momentum = 0.9f;
//this.momentum = 0.f;
this.decay = 0.9f;
this.epsilon = 1.E-8f;
}
/**
* Constructor
*
* @param baseStepSize
*/
public DeepmindRMSProp(final float baseStepSize)
{
super(baseStepSize);
this.momentum = 0.9f;
//this.momentum = 0.f;
this.decay = 0.9f;
this.epsilon = 1.E-8f;
}
/**
* Constructor
*
* @param baseStepSize
* @param momentum
* @param decay
* @param epsilon
*/
public DeepmindRMSProp
(
final float baseStepSize,
final float momentum,
final float decay,
final float epsilon
)
{
super(baseStepSize);
this.momentum = momentum;
this.decay = decay;
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public void maximiseObjective
(
final FVector params,
final FVector gradients
)
{
final FVector velocity = gradients.copy();
velocity.mult(baseStepSize / velocity.dim());
if (movingAvgGradients == null)
{
// need to initialize vectors for moving averages
movingAvgGradients = new FVector(gradients.dim());
movingAvgSquaredGradients = new FVector(gradients.dim());
}
else
{
// may have to grow moving average vectors if feature set grew
while (movingAvgGradients.dim() < gradients.dim())
{
movingAvgGradients = movingAvgGradients.append(0.f);
movingAvgSquaredGradients = movingAvgSquaredGradients.append(0.f);
}
}
// update moving averages
movingAvgGradients.mult(decay);
movingAvgGradients.addScaled(gradients, (1.f - decay));
final FVector gradientsSquared = gradients.copy();
gradientsSquared.hadamardProduct(gradientsSquared);
movingAvgSquaredGradients.mult(decay);
movingAvgSquaredGradients.addScaled(gradientsSquared, (1.f - decay));
// use them to divide the new velocity
final FVector denominator = movingAvgSquaredGradients.copy();
final FVector temp = movingAvgGradients.copy();
temp.hadamardProduct(temp);
denominator.subtract(temp);
denominator.add(epsilon);
denominator.sqrt();
velocity.elementwiseDivision(denominator);
// add momentum
if (momentum > 0.f && lastVelocity != null)
{
while (lastVelocity.dim() < velocity.dim())
{
// feature set has grown, so also need to grow the lastVelocity vector
lastVelocity = lastVelocity.append(0.f);
}
velocity.addScaled(lastVelocity, momentum);
}
params.add(velocity);
lastVelocity = velocity;
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs an RMSProp object from instructions in the
* given array of lines
*/
public static DeepmindRMSProp fromLines(final String[] lines)
{
float baseStepSize = 0.005f;
float momentum = 0.9f;
float decay = 0.9f;
float epsilon = 1.E-8f;
for (String line : lines)
{
final String[] lineParts = line.split(",");
//-----------------------------------------------------------------
// main parts
//-----------------------------------------------------------------
if (lineParts[0].toLowerCase().startsWith("basestepsize="))
{
baseStepSize = Float.parseFloat(
lineParts[0].substring("basestepsize=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("momentum="))
{
momentum = Float.parseFloat(
lineParts[0].substring("momentum=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("decay="))
{
decay = Float.parseFloat(
lineParts[0].substring("decay=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("epsilon="))
{
epsilon = Float.parseFloat(
lineParts[0].substring("epsilon=".length()));
}
}
return new DeepmindRMSProp(baseStepSize, momentum, decay, epsilon);
}
//-------------------------------------------------------------------------
@Override
public void writeToFile(final String filepath)
{
try
(
final ObjectOutputStream out =
new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(filepath)))
)
{
out.writeObject(this);
out.flush();
out.close();
}
catch (final IOException e)
{
e.printStackTrace();
}
}
//-------------------------------------------------------------------------
}
| 6,482 | 24.828685 | 84 | java |
Ludii | Ludii-master/AI/src/optimisers/Optimiser.java | package optimisers;
import java.io.Serializable;
import main.collections.FVector;
/**
* Base class for optimizers. All optimizers are pretty much assumed to be
* variants of Mini-Batch Gradient Descent.
*
* @author Dennis Soemers
*/
public abstract class Optimiser implements Serializable
{
//-------------------------------------------------------------------------
/** */
private static final long serialVersionUID = 1L;
//-------------------------------------------------------------------------
/** Base step-size (or learning rate) to use */
protected final float baseStepSize;
//-------------------------------------------------------------------------
/**
* Constructor
* @param baseStepSize
*/
public Optimiser(final float baseStepSize)
{
this.baseStepSize = baseStepSize;
}
//-------------------------------------------------------------------------
/**
* Should be implemented to adjust the given vector of parameters in an
* attempt to maximise an objective function. The objective function is
* implied by a vector of (estimates of) gradients of that objective
* function with respect to the trainable parameters.
*
* @param params
* Parameters to train
* @param gradients
* Vector of (estimates of) gradients of objective with respect to params.
*/
public abstract void maximiseObjective(final FVector params, final FVector gradients);
/**
* Calls maximiseObjective() with negated gradients, in order to minimize
* the objective.
*
* @param params
* @param gradients
*/
public final void minimiseObjective(final FVector params, final FVector gradients)
{
final FVector negatedGrads = gradients.copy();
negatedGrads.mult(-1.f);
maximiseObjective(params, negatedGrads);
}
//-------------------------------------------------------------------------
/**
* Writes this optimiser's internal state to a binary file
* @param filepath
*/
public abstract void writeToFile(final String filepath);
//-------------------------------------------------------------------------
}
| 2,091 | 26.168831 | 87 | java |
Ludii | Ludii-master/AI/src/optimisers/OptimiserFactory.java | package optimisers;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
/**
* Can create Optimizers based on strings / files
*
* @author Dennis Soemers
*/
public class OptimiserFactory
{
//-------------------------------------------------------------------------
/**
* Constructor should not be used.
*/
private OptimiserFactory()
{
// not intended to be used
}
//-------------------------------------------------------------------------
/**
* @param string String representation of optimizer,
* or filename from which to load optimizer
*
* @return Created AI
*/
public static Optimiser createOptimiser(final String string)
{
if (string.equalsIgnoreCase("SGD"))
{
return new SGD(0.05f);
}
else if (string.equalsIgnoreCase("RMSProp"))
{
return new DeepmindRMSProp();
}
else if (string.equalsIgnoreCase("AMSGrad"))
{
// the Karpathy constant:
// https://twitter.com/karpathy/status/801621764144971776
return new AMSGrad(3E-4f);
}
// try to interpret the given string as a resource or some other
// kind of file
final URL optimiserURL = OptimiserFactory.class.getResource(string);
File optimiserFile = null;
if (optimiserURL != null)
{
optimiserFile = new File(optimiserURL.getFile());
}
else
{
optimiserFile = new File(string);
}
String[] lines = new String[0];
if (optimiserFile.exists())
{
try (BufferedReader reader = new BufferedReader(
new FileReader(optimiserFile)))
{
final List<String> linesList = new ArrayList<String>();
String line = reader.readLine();
while (line != null)
{
linesList.add(line);
}
lines = linesList.toArray(lines);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
else
{
// assume semicolon-separated lines directly passed as
// command line arg
lines = string.split(";");
}
final String firstLine = lines[0];
if (firstLine.startsWith("optimiser="))
{
final String optimiserName =
firstLine.substring("optimiser=".length());
if (optimiserName.equalsIgnoreCase("SGD"))
{
// UCT is the default implementation of MCTS,
// so both cases are the same
return SGD.fromLines(lines);
}
else if (optimiserName.equalsIgnoreCase("RMSProp"))
{
return DeepmindRMSProp.fromLines(lines);
}
else if (optimiserName.equalsIgnoreCase("AMSGrad"))
{
return AMSGrad.fromLines(lines);
}
else
{
System.err.println("Unknown optimizer name: " + optimiserName);
}
}
else
{
System.err.println(
"Expecting Optimizer file to start with \"optimiser=\", "
+ "but it starts with " + firstLine);
}
System.err.println(String.format(
"Warning: cannot convert string \"%s\" to Optimiser; "
+ "defaulting to vanilla SGD.",
string));
return new SGD(0.05f);
}
//-------------------------------------------------------------------------
}
| 3,098 | 21.294964 | 76 | java |
Ludii | Ludii-master/AI/src/optimisers/SGD.java | package optimisers;
import java.io.BufferedOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import main.collections.FVector;
/**
* A standard Stochastic Gradient Descent optimiser, with optional support
* for a simple momentum term.
*
* @author Dennis Soemers
*/
public class SGD extends Optimiser
{
//-------------------------------------------------------------------------
/** */
private static final long serialVersionUID = 1L;
//-------------------------------------------------------------------------
/**
* Momentum term.
* "Velocity" of previous update is scaled by this value and added to
* subsequent update.
*/
protected final float momentum;
/**
* Last "velocity" vector. Used for momentum.
*/
private FVector lastVelocity = null;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param baseStepSize
*/
public SGD(final float baseStepSize)
{
super(baseStepSize);
this.momentum = 0.f;
}
/**
* Constructor with momentum
*
* @param baseStepSize
* @param momentum
*/
public SGD(final float baseStepSize, final float momentum)
{
super(baseStepSize);
this.momentum = momentum;
}
//-------------------------------------------------------------------------
@Override
public void maximiseObjective(
final FVector params,
final FVector gradients) {
final FVector velocity = gradients.copy();
velocity.mult(baseStepSize);
if (momentum > 0.f && lastVelocity != null)
{
while (lastVelocity.dim() < velocity.dim())
{
// feature set has grown, so also need to grow the lastVelocity
// vector
lastVelocity = lastVelocity.append(0.f);
}
velocity.addScaled(lastVelocity, momentum);
}
params.add(velocity);
lastVelocity = velocity;
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs an SGD object from instructions in the
* given array of lines
*/
public static SGD fromLines(final String[] lines)
{
float baseStepSize = 0.05f;
float momentum = 0.f;
for (String line : lines)
{
final String[] lineParts = line.split(",");
//-----------------------------------------------------------------
// main parts
//-----------------------------------------------------------------
if (lineParts[0].toLowerCase().startsWith("basestepsize="))
{
baseStepSize = Float.parseFloat(
lineParts[0].substring("basestepsize=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("momentum="))
{
momentum = Float.parseFloat(
lineParts[0].substring("momentum=".length()));
}
}
return new SGD(baseStepSize, momentum);
}
//-------------------------------------------------------------------------
@Override
public void writeToFile(final String filepath)
{
try
(
final ObjectOutputStream out =
new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(filepath)))
)
{
out.writeObject(this);
out.flush();
out.close();
}
catch (final IOException e)
{
e.printStackTrace();
}
}
//-------------------------------------------------------------------------
}
| 3,308 | 21.510204 | 84 | java |
Ludii | Ludii-master/AI/src/playout_move_selectors/DecisionTreeMoveSelector.java | package playout_move_selectors;
import decision_trees.classifiers.DecisionTreeNode;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
/**
* PlayoutMoveSelector for playouts which uses a distribution over actions
* computed by move-classification feature trees.
*
* @author Dennis Soemers
*/
public class DecisionTreeMoveSelector extends PlayoutMoveSelector
{
//-------------------------------------------------------------------------
/** Feature sets (one per player, or just a shared one at index 0) */
protected final BaseFeatureSet[] featureSets;
/** Classification tree root nodes (one per player, or just a shared one at index 0) */
protected final DecisionTreeNode[] rootNodes;
/** Do we want to play greedily? */
protected final boolean greedy;
//-------------------------------------------------------------------------
/**
* Constructor
* @param featureSets Feature sets (one per player, or just a shared one at index 0)
* @param rootNodes Classification tree root nodes (one per player, or just a shared one at index 0)
* @param greedy Do we want to play greedily?
*/
public DecisionTreeMoveSelector
(
final BaseFeatureSet[] featureSets,
final DecisionTreeNode[] rootNodes,
final boolean greedy
)
{
this.featureSets = featureSets;
this.rootNodes = rootNodes;
this.greedy = greedy;
}
//-------------------------------------------------------------------------
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
final BaseFeatureSet featureSet;
final DecisionTreeNode rootNode;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
rootNode = rootNodes[0];
}
else
{
featureSet = featureSets[p];
rootNode = rootNodes[p];
}
final FeatureVector[] featureVectors = featureSet.computeFeatureVectors(context, maybeLegalMoves, false);
final float[] unnormalisedProbs = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
unnormalisedProbs[i] = rootNode.predict(featureVectors[i]);
}
final FVector distribution = FVector.wrap(unnormalisedProbs);
distribution.normalise();
int numLegalMoves = maybeLegalMoves.size();
while (numLegalMoves > 0)
{
--numLegalMoves; // We're trying a move; if this one fails, it's actually not legal
final int n = (greedy) ? distribution.argMaxRand() : distribution.sampleFromDistribution();
final Move move = maybeLegalMoves.get(n);
if (isMoveReallyLegal.checkMove(move))
return move; // Only return this move if it's really legal
else
distribution.updateSoftmaxInvalidate(n); // Incrementally update the softmax, move n is invalid
}
// No legal moves?
return null;
}
//-------------------------------------------------------------------------
}
| 3,099 | 27.181818 | 107 | java |
Ludii | Ludii-master/AI/src/playout_move_selectors/EpsilonGreedyWrapper.java | package playout_move_selectors;
import java.util.concurrent.ThreadLocalRandom;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
/**
* Epsilon-greedy wrapper around a Playout Move Selector
*
* @author Dennis Soemers
*/
public class EpsilonGreedyWrapper extends PlayoutMoveSelector
{
//-------------------------------------------------------------------------
/** The wrapped playout move selector */
protected final PlayoutMoveSelector wrapped;
/** Probability of picking uniformly at random */
protected final double epsilon;
//-------------------------------------------------------------------------
/**
* Constructor
* @param wrapped
* @param epsilon
*/
public EpsilonGreedyWrapper(final PlayoutMoveSelector wrapped, final double epsilon)
{
this.wrapped = wrapped;
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
return wrapped.selectMove(context, maybeLegalMoves, p, isMoveReallyLegal);
}
@Override
public boolean wantsPlayUniformRandomMove()
{
return ThreadLocalRandom.current().nextDouble() < epsilon;
}
//-------------------------------------------------------------------------
}
| 1,462 | 22.596774 | 85 | java |
Ludii | Ludii-master/AI/src/playout_move_selectors/FeaturesSoftmaxMoveSelector.java | package playout_move_selectors;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
/**
* PlayoutMoveSelector for playouts which uses a softmax over actions with logits
* computed by features.
*
* @author Dennis Soemers
*/
public class FeaturesSoftmaxMoveSelector extends PlayoutMoveSelector // TODO also a greedy version?
{
//-------------------------------------------------------------------------
/** Feature sets (one per player, or just a shared one at index 0) */
protected final BaseFeatureSet[] featureSets;
/** Weight vectors (one per player, or just a shared one at index 0) */
protected final WeightVector[] weights;
/** Do we want to use thresholding to ignore low-weight features? */
protected final boolean thresholded;
//-------------------------------------------------------------------------
/**
* Constructor
* @param featureSets Feature sets (one per player, or just a shared one at index 0)
* @param weights Weight vectors (one per player, or just a shared one at index 0)
* @param thresholded Do we want to use thresholding to ignore low-weight features?
*/
public FeaturesSoftmaxMoveSelector
(
final BaseFeatureSet[] featureSets,
final WeightVector[] weights,
final boolean thresholded
)
{
this.featureSets = featureSets;
this.weights = weights;
this.thresholded = thresholded;
}
//-------------------------------------------------------------------------
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
final BaseFeatureSet featureSet;
final WeightVector weightVector;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
weightVector = weights[0];
}
else
{
featureSet = featureSets[p];
weightVector = weights[p];
}
final FeatureVector[] featureVectors = featureSet.computeFeatureVectors(context, maybeLegalMoves, thresholded);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = weightVector.dot(featureVectors[i]);
}
final FVector distribution = FVector.wrap(logits);
distribution.softmax();
int numLegalMoves = maybeLegalMoves.size();
while (numLegalMoves > 0)
{
--numLegalMoves; // We're trying a move; if this one fails, it's actually not legal
final int n = distribution.sampleFromDistribution();
final Move move = maybeLegalMoves.get(n);
if (isMoveReallyLegal.checkMove(move))
return move; // Only return this move if it's really legal
else
distribution.updateSoftmaxInvalidate(n); // Incrementally update the softmax, move n is invalid
}
// No legal moves?
return null;
}
//-------------------------------------------------------------------------
}
| 3,074 | 26.954545 | 113 | java |
Ludii | Ludii-master/AI/src/playout_move_selectors/LogitTreeMoveSelector.java | package playout_move_selectors;
import decision_trees.logits.LogitTreeNode;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
/**
* PlayoutMoveSelector for playouts which uses a softmax over actions with logits
* computed by feature regression trees.
*
* @author Dennis Soemers
*/
public class LogitTreeMoveSelector extends PlayoutMoveSelector
{
//-------------------------------------------------------------------------
/** Feature sets (one per player, or just a shared one at index 0) */
protected final BaseFeatureSet[] featureSets;
/** Regression tree root nodes (one per player, or just a shared one at index 0) */
protected final LogitTreeNode[] rootNodes;
/** Do we want to play greedily? */
protected final boolean greedy;
/** Temperature for the distribution */
protected final double temperature;
//-------------------------------------------------------------------------
/**
* Constructor
* @param featureSets Feature sets (one per player, or just a shared one at index 0)
* @param rootNodes Regression tree root nodes (one per player, or just a shared one at index 0)
* @param greedy Do we want to play greedily?
* @param temperature
*/
public LogitTreeMoveSelector
(
final BaseFeatureSet[] featureSets,
final LogitTreeNode[] rootNodes,
final boolean greedy,
final double temperature
)
{
this.featureSets = featureSets;
this.rootNodes = rootNodes;
this.greedy = greedy;
this.temperature = temperature;
}
//-------------------------------------------------------------------------
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
final BaseFeatureSet featureSet;
final LogitTreeNode rootNode;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
rootNode = rootNodes[0];
}
else
{
featureSet = featureSets[p];
rootNode = rootNodes[p];
}
final FeatureVector[] featureVectors = featureSet.computeFeatureVectors(context, maybeLegalMoves, false);
final float[] logits = new float[featureVectors.length];
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = rootNode.predict(featureVectors[i]);
}
final FVector distribution = FVector.wrap(logits);
distribution.softmax(temperature);
int numLegalMoves = maybeLegalMoves.size();
while (numLegalMoves > 0)
{
--numLegalMoves; // We're trying a move; if this one fails, it's actually not legal
final int n = greedy ? distribution.argMaxRand() : distribution.sampleFromDistribution();
final Move move = maybeLegalMoves.get(n);
if (isMoveReallyLegal.checkMove(move))
return move; // Only return this move if it's really legal
else
distribution.updateSoftmaxInvalidate(n); // Incrementally update the softmax, move n is invalid
}
// No legal moves?
return null;
}
//-------------------------------------------------------------------------
}
| 3,205 | 26.637931 | 107 | java |
Ludii | Ludii-master/AI/src/policies/GreedyPolicy.java | package policies;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import features.feature_sets.network.JITSPatterNetFeatureSet;
import function_approx.BoostedLinearFunction;
import function_approx.LinearFunction;
import game.Game;
import game.rules.play.moves.Moves;
import gnu.trove.list.array.TIntArrayList;
import main.Constants;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.trial.Trial;
import playout_move_selectors.FeaturesSoftmaxMoveSelector;
import search.mcts.MCTS;
import utils.ExperimentFileUtils;
/**
* A greedy policy (plays greedily according to estimates by a linear function
* approximator).
*
* @author Dennis Soemers and cambolbro
*/
public class GreedyPolicy extends Policy
{
//-------------------------------------------------------------------------
/**
* Linear function approximators (can output one logit per action)
*
* If it contains only one function, it will be shared across all
* players. Otherwise, it will contain one function per player.
*/
protected LinearFunction[] linearFunctions;
/**
* Feature Sets to use to generate feature vectors for state+action pairs.
*
* If it contains only one feature set, it will be shared across all
* players. Otherwise, it will contain one Feature Set per player.
*/
protected BaseFeatureSet[] featureSets;
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = 200;
//-------------------------------------------------------------------------
/**
* Default constructor. Will initialize important parts to null and break
* down if used directly. Should customize() it first!
*/
public GreedyPolicy()
{
linearFunctions = null;
featureSets = null;
}
/**
* Constructs a greedy policy with linear function approximators
* @param linearFunctions
* @param featureSets
*/
public GreedyPolicy
(
final LinearFunction[] linearFunctions,
final BaseFeatureSet[] featureSets
)
{
this.linearFunctions = linearFunctions;
this.featureSets = featureSets;
}
//-------------------------------------------------------------------------
@Override
public FVector computeDistribution
(
final Context context,
final FastArrayList<Move> actions,
final boolean thresholded
)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
}
else
{
featureSet = featureSets[context.state().mover()];
}
return computeDistribution
(
featureSet.computeFeatureVectors(context, actions, thresholded),
context.state().mover()
);
}
/**
* @param featureVectors
* @param player
* @return Logits for the actions implied by a list of feature vectors.
*/
public float[] computeLogits
(
final FeatureVector[] featureVectors,
final int player
)
{
final float[] logits = new float[featureVectors.length];
final LinearFunction linearFunction;
if (linearFunctions.length == 1)
{
linearFunction = linearFunctions[0];
}
else
{
linearFunction = linearFunctions[player];
}
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = linearFunction.predict(featureVectors[i]);
}
return logits;
}
@Override
public float computeLogit(final Context context, final Move move)
{
final LinearFunction linearFunction;
if (linearFunctions.length == 1)
linearFunction = linearFunctions[0];
else
linearFunction = linearFunctions[context.state().mover()];
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
return linearFunction.predict(featureSet.computeFeatureVector(context, move, true));
}
/**
* @param featureVectors One feature vector per action
* @param player Player for which to use features
*
* @return Probability distribution over actions implied by a list of sparse
* feature vectors
*/
public FVector computeDistribution
(
final FeatureVector[] featureVectors,
final int player
)
{
final float[] logits = computeLogits(featureVectors, player);
float maxLogit = Float.NEGATIVE_INFINITY;
final TIntArrayList maxLogitIndices = new TIntArrayList();
for (int i = 0; i < logits.length; ++i)
{
final float logit = logits[i];
if (logit > maxLogit)
{
maxLogit = logit;
maxLogitIndices.reset();
maxLogitIndices.add(i);
}
else if (logit == maxLogit)
{
maxLogitIndices.add(i);
}
}
// this is the probability we assign to all max logits
final float maxProb = 1.f / maxLogitIndices.size();
// now create the distribution
final FVector distribution = new FVector(logits.length);
for (int i = 0; i < maxLogitIndices.size(); ++i)
{
distribution.set(maxLogitIndices.getQuick(i), maxProb);
}
return distribution;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final WeightVector[] params = new WeightVector[linearFunctions.length];
for (int i = 0; i < linearFunctions.length; ++i)
{
if (linearFunctions[i] == null)
{
params[i] = null;
}
else
{
params[i] = linearFunctions[i].effectiveParams();
}
}
return context.game().playout
(
context,
null,
1.0,
new FeaturesSoftmaxMoveSelector(featureSets, params, true),
-1,
playoutTurnLimit,
ThreadLocalRandom.current()
);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
final List<String> policyWeightsFilepaths = new ArrayList<String>();
boolean boosted = false;
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("policyweights="))
{
if (policyWeightsFilepaths.size() > 0)
policyWeightsFilepaths.clear();
policyWeightsFilepaths.add(input.substring("policyweights=".length()));
}
else if (input.toLowerCase().startsWith("policyweights"))
{
for (int p = 1; p <= Constants.MAX_PLAYERS; ++p)
{
if (input.toLowerCase().startsWith("policyweights" + p + "="))
{
while (policyWeightsFilepaths.size() <= p)
{
policyWeightsFilepaths.add(null);
}
if (p < 10)
policyWeightsFilepaths.set(p, input.substring("policyweightsX=".length()));
else // Doubt we'll ever have more than 99 players
policyWeightsFilepaths.set(p, input.substring("policyweightsXX=".length()));
}
}
}
else if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt(
input.substring("playoutturnlimit=".length()));
}
else if (input.toLowerCase().startsWith("friendly_name="))
{
friendlyName =
input.substring("friendly_name=".length());
}
else if (input.toLowerCase().startsWith("boosted="))
{
if (input.toLowerCase().endsWith("true"))
{
boosted = true;
}
}
}
if (!policyWeightsFilepaths.isEmpty())
{
this.linearFunctions = new LinearFunction[policyWeightsFilepaths.size()];
this.featureSets = new BaseFeatureSet[linearFunctions.length];
for (int i = 0; i < policyWeightsFilepaths.size(); ++i)
{
String policyWeightsFilepath = policyWeightsFilepaths.get(i);
if (policyWeightsFilepath != null)
{
final String parentDir = new File(policyWeightsFilepath).getParent();
if (!new File(policyWeightsFilepath).exists())
{
// Replace with whatever is the latest file we have
if (policyWeightsFilepath.contains("Selection"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsSelection_P" + i, "txt");
}
else if (policyWeightsFilepath.contains("Playout"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsPlayout_P" + i, "txt");
}
else if (policyWeightsFilepath.contains("TSPG"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsTSPG_P" + i, "txt");
}
else
{
policyWeightsFilepath = null;
}
}
if (boosted)
linearFunctions[i] = BoostedLinearFunction.boostedFromFile(policyWeightsFilepath, null);
else
linearFunctions[i] = LinearFunction.fromFile(policyWeightsFilepath);
featureSets[i] = JITSPatterNetFeatureSet.construct(parentDir + File.separator + linearFunctions[i].featureSetFile());
}
}
}
else
{
System.err.println("Cannot construct Greedy Policy from: " + Arrays.toString(inputs));
}
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final Moves actions = game.moves(context);
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
}
else
{
featureSet = featureSets[context.state().mover()];
}
return actions.moves().get(FVector.wrap
(
computeLogits
(
featureSet.computeFeatureVectors
(
context,
actions.moves(),
true
),
context.state().mover()
)
).argMaxRand());
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return A greedy policy constructed from a given array of input lines
*/
public static GreedyPolicy fromLines(final String[] lines)
{
final GreedyPolicy policy = new GreedyPolicy();
policy.customise(lines);
return policy;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (featureSets.length == 1)
{
final int[] supportedPlayers = new int[game.players().count()];
for (int i = 0; i < supportedPlayers.length; ++i)
{
supportedPlayers[i] = i + 1;
}
featureSets[0].init(game, supportedPlayers, linearFunctions[0].effectiveParams());
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].init(game, new int[] {i}, linearFunctions[i].effectiveParams());
}
}
}
//-------------------------------------------------------------------------
}
| 10,937 | 23.915718 | 122 | java |
Ludii | Ludii-master/AI/src/policies/Policy.java | package policies;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.AI;
import other.context.Context;
import other.move.Move;
import search.mcts.playout.PlayoutStrategy;
/**
* A policy is something that can compute distributions over actions in a given
* state (presumably using some form of function approximation).
*
* Policies should also implement the methods required to function as
* Play-out strategies for MCTS or function as a full AI agent.
*
* @author Dennis Soemers
*/
public abstract class Policy extends AI implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/**
* @param context
* @param actions
* @param thresholded
* @return Probability distribution over the given list of actions in the given state.
*/
public abstract FVector computeDistribution
(
final Context context,
final FastArrayList<Move> actions,
final boolean thresholded
);
//-------------------------------------------------------------------------
/**
* @param context
* @param move
* @return Logit for a single move in a single state
*/
public abstract float computeLogit(final Context context, final Move move);
//-------------------------------------------------------------------------
}
| 1,320 | 25.959184 | 87 | java |
Ludii | Ludii-master/AI/src/policies/ProportionalPolicyClassificationTree.java | package policies;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import decision_trees.classifiers.DecisionTreeNode;
import features.Feature;
import features.FeatureVector;
import features.aspatial.AspatialFeature;
import features.feature_sets.BaseFeatureSet;
import features.feature_sets.network.JITSPatterNetFeatureSet;
import features.spatial.SpatialFeature;
import game.Game;
import game.rules.play.moves.Moves;
import game.types.play.RoleType;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.features.trees.FeatureTrees;
import metadata.ai.features.trees.classifiers.DecisionTree;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
import other.trial.Trial;
import playout_move_selectors.DecisionTreeMoveSelector;
import playout_move_selectors.EpsilonGreedyWrapper;
import search.mcts.MCTS;
/**
* A policy that uses a Classification Tree to compute probabilities per move.
*
* @author Dennis Soemers
*/
public class ProportionalPolicyClassificationTree extends Policy
{
//-------------------------------------------------------------------------
/**
* Roots of decision trees that can output probability estimates (one per legal move).
*
* If it contains only one root, it will be shared across all
* players. Otherwise, it will contain one root per player.
*/
protected DecisionTreeNode[] decisionTreeRoots;
/**
* Feature Sets to use to generate feature vectors for state+action pairs.
*
* If it contains only one feature set, it will be shared across all
* players. Otherwise, it will contain one Feature Set per player.
*/
protected BaseFeatureSet[] featureSets;
/**
* If >= 0, we'll only actually use this policy in MCTS play-outs
* for up to this many actions. If a play-out still did not terminate
* after this many play-out actions, we revert to a random play-out
* strategy as fallback
*/
protected int playoutActionLimit = -1;
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = -1;
/** Epsilon for epsilon-greedy playouts */
protected double epsilon = 0.0;
/** If true, we play greedily instead of sampling proportional to probabilities */
protected boolean greedy = false;
//-------------------------------------------------------------------------
/**
* Default constructor. Will initialise important parts to null and break
* down if used directly. Should customise() it first!
*/
public ProportionalPolicyClassificationTree()
{
decisionTreeRoots = null;
featureSets = null;
}
/**
* Constructs a policy with classification tree(s) for probabilities
* @param decisionTreeRoots
* @param featureSets
*/
public ProportionalPolicyClassificationTree
(
final DecisionTreeNode[] decisionTreeRoots,
final BaseFeatureSet[] featureSets
)
{
this.decisionTreeRoots = decisionTreeRoots;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
}
/**
* Constructs a policy with classification tree(s) for probabilities,
* and a limit on the number of play-out actions to run with this policy
* plus a fallback Play-out strategy to use afterwards.
*
* @param featureSets
* @param playoutActionLimit
*/
public ProportionalPolicyClassificationTree
(
final DecisionTreeNode[] decisionTreeRoots,
final BaseFeatureSet[] featureSets,
final int playoutActionLimit
)
{
this.decisionTreeRoots = decisionTreeRoots;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
this.playoutActionLimit = playoutActionLimit;
}
/**
* Constructs a policy from a given set of feature trees as created
* by the compiler, using classification trees
*
* @param featureTrees
* @param epsilon Epsilon for epsilon-greedy playouts
*/
public static ProportionalPolicyClassificationTree constructPolicy(final FeatureTrees featureTrees, final double epsilon)
{
final ProportionalPolicyClassificationTree softmax = new ProportionalPolicyClassificationTree();
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<DecisionTreeNode> roots = new ArrayList<DecisionTreeNode>();
for (final DecisionTree classificationTree : featureTrees.decisionTrees())
{
if (classificationTree.role() == RoleType.Shared || classificationTree.role() == RoleType.Neutral)
addFeatureSetRoot(0, classificationTree.root(), featureSetsList, roots);
else
addFeatureSetRoot(classificationTree.role().owner(), classificationTree.root(), featureSetsList, roots);
}
softmax.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
softmax.decisionTreeRoots = roots.toArray(new DecisionTreeNode[roots.size()]);
softmax.epsilon = epsilon;
return softmax;
}
//-------------------------------------------------------------------------
@Override
public FVector computeDistribution
(
final Context context,
final FastArrayList<Move> actions,
final boolean thresholded
)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
return computeDistribution(featureSet.computeFeatureVectors(context, actions, thresholded), context.state().mover());
}
/**
* @param featureVectors
* @param player
* @return Probability distribution over actions implied by a list of sparse
* feature vectors
*/
public FVector computeDistribution
(
final FeatureVector[] featureVectors,
final int player
)
{
final float[] logits = new float[featureVectors.length];
final DecisionTreeNode decisionTreeRoot;
if (decisionTreeRoots.length == 1)
decisionTreeRoot = decisionTreeRoots[0];
else
decisionTreeRoot = decisionTreeRoots[player];
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = decisionTreeRoot.predict(featureVectors[i]);
}
final FVector distribution = FVector.wrap(logits);
distribution.normalise();
return distribution;
}
@Override
public float computeLogit(final Context context, final Move move)
{
final DecisionTreeNode decisionTreeRoot;
if (decisionTreeRoots.length == 1)
decisionTreeRoot = decisionTreeRoots[0];
else
decisionTreeRoot = decisionTreeRoots[context.state().mover()];
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
return decisionTreeRoot.predict(featureSet.computeFeatureVector(context, move, true));
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final PlayoutMoveSelector playoutMoveSelector;
if (epsilon < 1.0)
{
if (epsilon <= 0.0)
playoutMoveSelector = new DecisionTreeMoveSelector(featureSets, decisionTreeRoots, greedy);
else
playoutMoveSelector = new EpsilonGreedyWrapper(new DecisionTreeMoveSelector(featureSets, decisionTreeRoots, greedy), epsilon);
}
else
{
playoutMoveSelector = null;
}
return context.game().playout
(
context,
null,
1.0,
playoutMoveSelector,
playoutActionLimit,
playoutTurnLimit,
ThreadLocalRandom.current()
);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
String policyTreesFilepath = null;
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("policytrees="))
{
policyTreesFilepath = input.substring("policytrees=".length());
}
else if (input.toLowerCase().startsWith("playoutactionlimit="))
{
playoutActionLimit =
Integer.parseInt(input.substring(
"playoutactionlimit=".length()));
}
else if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
else if (input.toLowerCase().startsWith("friendly_name="))
{
friendlyName = input.substring("friendly_name=".length());
}
else if (input.toLowerCase().startsWith("epsilon="))
{
epsilon = Double.parseDouble(input.substring("epsilon=".length()));
}
else if (input.toLowerCase().startsWith("greedy="))
{
greedy = Boolean.parseBoolean(input.substring("greedy=".length()));
}
}
if (policyTreesFilepath != null)
{
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<DecisionTreeNode> roots = new ArrayList<DecisionTreeNode>();
try
{
final String featureTreesString = FileHandling.loadTextContentsFromFile(policyTreesFilepath);
final FeatureTrees featureTrees =
(FeatureTrees)compiler.Compiler.compileObject
(
featureTreesString,
"metadata.ai.features.trees.FeatureTrees",
new Report()
);
for (final DecisionTree decisionTree : featureTrees.decisionTrees())
{
if (decisionTree.role() == RoleType.Shared || decisionTree.role() == RoleType.Neutral)
addFeatureSetRoot(0, decisionTree.root(), featureSetsList, roots);
else
addFeatureSetRoot(decisionTree.role().owner(), decisionTree.root(), featureSetsList, roots);
}
this.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
this.decisionTreeRoots = roots.toArray(new DecisionTreeNode[roots.size()]);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
else
{
System.err.println("Cannot construct Proportional Policy Classification Tree from: " + Arrays.toString(inputs));
}
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final Moves actions = game.moves(context);
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
}
else
{
featureSet = featureSets[context.state().mover()];
}
final FVector distribution =
computeDistribution
(
featureSet.computeFeatureVectors
(
context,
actions.moves(),
true
),
context.state().mover()
);
if (greedy)
{
return actions.moves().get(distribution.argMaxRand());
}
else
{
return actions.moves().get(distribution.sampleFromDistribution());
}
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (featureSets.length == 1)
{
final int[] supportedPlayers = new int[game.players().count()];
for (int i = 0; i < supportedPlayers.length; ++i)
{
supportedPlayers[i] = i + 1;
}
featureSets[0].init(game, supportedPlayers, null);
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].init(game, new int[] {i}, null);
}
}
}
@Override
public void closeAI()
{
if (featureSets == null)
return;
if (featureSets.length == 1)
{
featureSets[0].closeCache();
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].closeCache();
}
}
}
//-------------------------------------------------------------------------
/**
* @return Feature Sets used by this policy
*/
public BaseFeatureSet[] featureSets()
{
return featureSets;
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return A classification tree policy constructed from a given array of input lines
*/
public static ProportionalPolicyClassificationTree fromLines(final String[] lines)
{
ProportionalPolicyClassificationTree policy = null;
// for (final String line : lines)
// {
// if (line.equalsIgnoreCase("features=from_metadata"))
// {
// policy = new SoftmaxFromMetadataSelection(0.0);
// break;
// }
// }
//
// if (policy == null)
policy = new ProportionalPolicyClassificationTree();
policy.customise(lines);
return policy;
}
//-------------------------------------------------------------------------
/**
* Helper method that adds a Feature Set and a Decision Tree Root for the
* given player index
*
* @param playerIdx
* @param rootNode
* @param outFeatureSets
* @param outRoots
*/
protected static void addFeatureSetRoot
(
final int playerIdx,
final metadata.ai.features.trees.classifiers.DecisionTreeNode rootNode,
final List<BaseFeatureSet> outFeatureSets,
final List<DecisionTreeNode> outRoots
)
{
while (outFeatureSets.size() <= playerIdx)
{
outFeatureSets.add(null);
}
while (outRoots.size() <= playerIdx)
{
outRoots.add(null);
}
final List<AspatialFeature> aspatialFeatures = new ArrayList<AspatialFeature>();
final List<SpatialFeature> spatialFeatures = new ArrayList<SpatialFeature>();
final Set<String> featureStrings = new HashSet<String>();
rootNode.collectFeatureStrings(featureStrings);
for (final String featureString : featureStrings)
{
final Feature feature = Feature.fromString(featureString);
if (feature instanceof AspatialFeature)
aspatialFeatures.add((AspatialFeature)feature);
else
spatialFeatures.add((SpatialFeature)feature);
}
final BaseFeatureSet featureSet = JITSPatterNetFeatureSet.construct(aspatialFeatures, spatialFeatures);
outFeatureSets.set(playerIdx, featureSet);
outRoots.set(playerIdx, DecisionTreeNode.fromMetadataNode(rootNode, featureSet));
}
//-------------------------------------------------------------------------
}
| 14,188 | 26.026667 | 130 | java |
Ludii | Ludii-master/AI/src/policies/softmax/SoftmaxFromMetadataPlayout.java | package policies.softmax;
import java.util.ArrayList;
import java.util.List;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import game.Game;
import game.types.play.RoleType;
import main.collections.FVector;
import main.collections.FastArrayList;
import metadata.ai.features.Features;
import metadata.ai.features.trees.FeatureTrees;
import other.context.Context;
import other.move.Move;
import other.trial.Trial;
import search.mcts.MCTS;
/**
* A Softmax Policy that can automatically initialise itself by
* using the Playout features embedded in a game's metadata.
*
* @author Dennis Soemers
*/
public final class SoftmaxFromMetadataPlayout extends SoftmaxPolicy
{
//-------------------------------------------------------------------------
/** Softmax policy we wrap around; can change into a linear or decision tree based policy depending on metadata */
private SoftmaxPolicy wrappedSoftmax = null;
//-------------------------------------------------------------------------
/**
* Constructor
* @param epsilon Epsilon for epsilon-greedy feature-based playouts. 1 for uniform, 0 for always softmax
*/
public SoftmaxFromMetadataPlayout(final double epsilon)
{
friendlyName = "Softmax Policy (Playout features from Game metadata)";
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
final Features featuresMetadata = game.metadata().ai().features();
if (featuresMetadata != null)
{
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LinearFunction> linFuncs = new ArrayList<LinearFunction>();
wrappedSoftmax = new SoftmaxPolicyLinear();
wrappedSoftmax.epsilon = epsilon;
wrappedSoftmax.playoutActionLimit = 200;
for (final metadata.ai.features.FeatureSet featureSet : featuresMetadata.featureSets())
{
if (featureSet.role() == RoleType.Shared)
((SoftmaxPolicyLinear)wrappedSoftmax).addFeatureSetWeights(0, featureSet.featureStrings(), featureSet.playoutWeights(), featureSetsList, linFuncs);
else
((SoftmaxPolicyLinear)wrappedSoftmax).addFeatureSetWeights(featureSet.role().owner(), featureSet.featureStrings(), featureSet.playoutWeights(), featureSetsList, linFuncs);
}
((SoftmaxPolicyLinear) wrappedSoftmax).featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
((SoftmaxPolicyLinear) wrappedSoftmax).linearFunctions = linFuncs.toArray(new LinearFunction[linFuncs.size()]);
}
else
{
// TODO no distinction between selection and playout here
final FeatureTrees featureTrees = game.metadata().ai().trainedFeatureTrees();
wrappedSoftmax = SoftmaxPolicyLogitTree.constructPolicy(featureTrees, epsilon);
wrappedSoftmax.playoutActionLimit = 200;
}
wrappedSoftmax.initAI(game, playerID);
super.initAI(game, playerID);
}
@Override
public boolean supportsGame(final Game game)
{
// We support any game with appropriate features in metadata
if (game.metadata().ai() != null)
{
if (game.metadata().ai().features() != null)
{
final Features featuresMetadata = game.metadata().ai().features();
if (featuresMetadata.featureSets().length == 1 && featuresMetadata.featureSets()[0].role() == RoleType.Shared)
return true;
else
return (featuresMetadata.featureSets().length == game.players().count());
}
else if (game.metadata().ai().trainedFeatureTrees() != null)
{
return true;
}
else
{
return false;
}
}
return false;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return wrappedSoftmax.runPlayout(mcts, context);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
System.err.println("customise() not implemented for SoftmaxFromMetadataPlayout!");
}
@Override
public float computeLogit(final Context context, final Move move)
{
return wrappedSoftmax.computeLogit(context, move);
}
@Override
public FVector computeDistribution(final Context context, final FastArrayList<Move> actions, final boolean thresholded)
{
return wrappedSoftmax.computeDistribution(context, actions, thresholded);
}
@Override
public Move selectAction(final Game game, final Context context, final double maxSeconds, final int maxIterations, final int maxDepth)
{
return wrappedSoftmax.selectAction(game, context, maxSeconds, maxIterations, maxDepth);
}
}
| 4,771 | 29.787097 | 176 | java |
Ludii | Ludii-master/AI/src/policies/softmax/SoftmaxFromMetadataSelection.java | package policies.softmax;
import java.util.ArrayList;
import java.util.List;
import features.feature_sets.BaseFeatureSet;
import function_approx.LinearFunction;
import game.Game;
import game.types.play.RoleType;
import main.collections.FVector;
import main.collections.FastArrayList;
import metadata.ai.features.Features;
import metadata.ai.features.trees.FeatureTrees;
import other.context.Context;
import other.move.Move;
import other.trial.Trial;
import search.mcts.MCTS;
/**
* A Softmax Policy that can automatically initialise itself by
* using the Selection features embedded in a game's metadata.
*
* @author Dennis Soemers
*/
public class SoftmaxFromMetadataSelection extends SoftmaxPolicy
{
//-------------------------------------------------------------------------
/** Softmax policy we wrap around; can change into a linear or decision tree based policy depending on metadata */
private SoftmaxPolicy wrappedSoftmax = null;
//-------------------------------------------------------------------------
/**
* Constructor
* @param epsilon Epsilon for epsilon-greedy feature-based playouts. 1 for uniform, 0 for always softmax
*/
public SoftmaxFromMetadataSelection(final double epsilon)
{
friendlyName = "Softmax Policy (Selection features from Game metadata)";
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
try
{
final Features featuresMetadata = game.metadata().ai().features();
if (featuresMetadata != null)
{
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LinearFunction> linFuncs = new ArrayList<LinearFunction>();
wrappedSoftmax = new SoftmaxPolicyLinear();
wrappedSoftmax.epsilon = epsilon;
wrappedSoftmax.playoutActionLimit = 200;
for (final metadata.ai.features.FeatureSet featureSet : featuresMetadata.featureSets())
{
if (featureSet.role() == RoleType.Shared)
((SoftmaxPolicyLinear)wrappedSoftmax).addFeatureSetWeights(0, featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
else
((SoftmaxPolicyLinear)wrappedSoftmax).addFeatureSetWeights(featureSet.role().owner(), featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
}
((SoftmaxPolicyLinear) wrappedSoftmax).featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
((SoftmaxPolicyLinear) wrappedSoftmax).linearFunctions = linFuncs.toArray(new LinearFunction[linFuncs.size()]);
}
else
{
// TODO no distinction between selection and playout here
final FeatureTrees featureTrees = game.metadata().ai().trainedFeatureTrees();
wrappedSoftmax = SoftmaxPolicyLogitTree.constructPolicy(featureTrees, epsilon);
wrappedSoftmax.playoutActionLimit = 200;
}
wrappedSoftmax.initAI(game, playerID);
}
catch (final Exception e)
{
System.err.println("Game = " + game.name());
e.printStackTrace();
}
super.initAI(game, playerID);
}
@Override
public boolean supportsGame(final Game game)
{
// We support any game with appropriate features in metadata
if (game.metadata().ai() != null)
{
if (game.metadata().ai().features() != null)
{
final Features featuresMetadata = game.metadata().ai().features();
if (featuresMetadata.featureSets().length == 1 && featuresMetadata.featureSets()[0].role() == RoleType.Shared)
return true;
else
return (featuresMetadata.featureSets().length == game.players().count());
}
else if (game.metadata().ai().trainedFeatureTrees() != null)
{
return true;
}
else
{
return false;
}
}
return false;
}
//-------------------------------------------------------------------------
/**
* @return Our current wrapped softmax (linear or tree-based)
*/
public SoftmaxPolicy wrappedSoftmax()
{
return wrappedSoftmax;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return wrappedSoftmax.runPlayout(mcts, context);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
System.err.println("customise() not implemented for SoftmaxFromMetadataSelection!");
}
@Override
public float computeLogit(final Context context, final Move move)
{
return wrappedSoftmax.computeLogit(context, move);
}
@Override
public FVector computeDistribution(final Context context, final FastArrayList<Move> actions, final boolean thresholded)
{
return wrappedSoftmax.computeDistribution(context, actions, thresholded);
}
@Override
public Move selectAction(final Game game, final Context context, final double maxSeconds, final int maxIterations, final int maxDepth)
{
return wrappedSoftmax.selectAction(game, context, maxSeconds, maxIterations, maxDepth);
}
}
| 5,160 | 28.491429 | 179 | java |
Ludii | Ludii-master/AI/src/policies/softmax/SoftmaxPolicy.java | package policies.softmax;
import policies.Policy;
/**
* Abstract class for softmax policies; policies that compute
* logits for moves, and then pass them through a softmax to
* obtain a probability distribution over moves.
*
* @author Dennis Soemers
*/
public abstract class SoftmaxPolicy extends Policy
{
//-------------------------------------------------------------------------
/** Epsilon for epsilon-greedy playouts */
protected double epsilon = 0.0;
/**
* If >= 0, we'll only actually use this softmax policy in MCTS play-outs
* for up to this many actions. If a play-out still did not terminate
* after this many play-out actions, we revert to a random play-out
* strategy as fallback
*/
protected int playoutActionLimit = -1;
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = -1;
//-------------------------------------------------------------------------
}
| 958 | 27.205882 | 76 | java |
Ludii | Ludii-master/AI/src/policies/softmax/SoftmaxPolicyLinear.java | package policies.softmax;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import features.Feature;
import features.FeatureVector;
import features.WeightVector;
import features.aspatial.AspatialFeature;
import features.feature_sets.BaseFeatureSet;
import features.feature_sets.BaseFeatureSet.FeatureSetImplementations;
import features.feature_sets.LegacyFeatureSet;
import features.feature_sets.NaiveFeatureSet;
import features.feature_sets.network.JITSPatterNetFeatureSet;
import features.feature_sets.network.SPatterNetFeatureSet;
import features.spatial.SpatialFeature;
import function_approx.BoostedLinearFunction;
import function_approx.LinearFunction;
import game.Game;
import game.rules.play.moves.Moves;
import game.types.play.RoleType;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.Constants;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.features.Features;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
import other.trial.Trial;
import playout_move_selectors.EpsilonGreedyWrapper;
import playout_move_selectors.FeaturesSoftmaxMoveSelector;
import search.mcts.MCTS;
import utils.ExperimentFileUtils;
/**
* A policy which:
* - Uses a linear function approximator to compute one logit per action.
* - Uses softmax to compute a probability distribution from those logits.
* - Selects actions according to the softmax distribution.
*
* We extend the AI abstract class, which means this policy can also be used as
* a full agent (even though that's not the primary intended use).
*
* Similarly, we implement the PlayoutStrategy interface, so the policy can also
* be plugged into MCTS directly as a playout strategy.
*
* @author Dennis Soemers
*/
public class SoftmaxPolicyLinear extends SoftmaxPolicy
{
//-------------------------------------------------------------------------
/**
* Linear function approximators (can output one logit per action)
*
* If it contains only one function, it will be shared across all
* players. Otherwise, it will contain one function per player.
*/
protected LinearFunction[] linearFunctions;
/**
* Feature Sets to use to generate feature vectors for state+action pairs.
*
* If it contains only one feature set, it will be shared across all
* players. Otherwise, it will contain one Feature Set per player.
*/
protected BaseFeatureSet[] featureSets;
/** Implementation to use for feature sets */
protected FeatureSetImplementations implementation = FeatureSetImplementations.JITSPATTERNET;
//-------------------------------------------------------------------------
/**
* Default constructor. Will initialise important parts to null and break
* down if used directly. Should customise() it first!
*/
public SoftmaxPolicyLinear()
{
linearFunctions = null;
featureSets = null;
}
/**
* Constructs a softmax policy with a linear function approximator
* @param linearFunctions
* @param featureSets
*/
public SoftmaxPolicyLinear
(
final LinearFunction[] linearFunctions,
final BaseFeatureSet[] featureSets
)
{
this.linearFunctions = linearFunctions;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
}
/**
* Constructs a softmax policy with a linear function approximator,
* and a limit on the number of play-out actions to run with this policy
* plus a fallback Play-out strategy to use afterwards.
*
* @param linearFunctions
* @param featureSets
* @param playoutActionLimit
*/
public SoftmaxPolicyLinear
(
final LinearFunction[] linearFunctions,
final BaseFeatureSet[] featureSets,
final int playoutActionLimit
)
{
this.linearFunctions = linearFunctions;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
this.playoutActionLimit = playoutActionLimit;
}
/**
* Constructs a softmax policy from a given set of features as created
* by the compiler, using the Selection weights.
*
* @param features
* @param epsilon Epsilon for epsilon-greedy playouts (should be irrelevant for Selection policy)
*/
public static SoftmaxPolicyLinear constructSelectionPolicy(final Features features, final double epsilon)
{
final SoftmaxPolicyLinear softmax = new SoftmaxPolicyLinear();
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LinearFunction> linFuncs = new ArrayList<LinearFunction>();
for (final metadata.ai.features.FeatureSet featureSet : features.featureSets())
{
if (featureSet.role() == RoleType.Shared || featureSet.role() == RoleType.Neutral)
softmax.addFeatureSetWeights(0, featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
else
softmax.addFeatureSetWeights(featureSet.role().owner(), featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
}
softmax.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
softmax.linearFunctions = linFuncs.toArray(new LinearFunction[linFuncs.size()]);
softmax.epsilon = epsilon;
return softmax;
}
/**
* Constructs a softmax policy from a given set of features as created
* by the compiler, using the Playout weights.
*
* @param features
* @param epsilon Epsilon for epsilon-greedy playouts
*/
public static SoftmaxPolicyLinear constructPlayoutPolicy(final Features features, final double epsilon)
{
final SoftmaxPolicyLinear softmax = new SoftmaxPolicyLinear();
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LinearFunction> linFuncs = new ArrayList<LinearFunction>();
for (final metadata.ai.features.FeatureSet featureSet : features.featureSets())
{
if (featureSet.role() == RoleType.Shared || featureSet.role() == RoleType.Neutral)
softmax.addFeatureSetWeights(0, featureSet.featureStrings(), featureSet.playoutWeights(), featureSetsList, linFuncs);
else
softmax.addFeatureSetWeights(featureSet.role().owner(), featureSet.featureStrings(), featureSet.playoutWeights(), featureSetsList, linFuncs);
}
softmax.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
softmax.linearFunctions = linFuncs.toArray(new LinearFunction[linFuncs.size()]);
softmax.epsilon = epsilon;
return softmax;
}
//-------------------------------------------------------------------------
@Override
public FVector computeDistribution
(
final Context context,
final FastArrayList<Move> actions,
final boolean thresholded
)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
return computeDistribution(featureSet.computeFeatureVectors(context, actions, thresholded), context.state().mover());
}
@Override
public float computeLogit(final Context context, final Move move)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
final LinearFunction linearFunction;
if (linearFunctions.length == 1)
linearFunction = linearFunctions[0];
else
linearFunction = linearFunctions[context.state().mover()];
return linearFunction.predict(featureSet.computeFeatureVector(context, move, true));
}
/**
* @param featureVectors
* @param player
* @return Probability distribution over actions implied by a list of sparse
* feature vectors
*/
public FVector computeDistribution
(
final FeatureVector[] featureVectors,
final int player
)
{
final float[] logits = new float[featureVectors.length];
final LinearFunction linearFunction;
if (linearFunctions.length == 1)
linearFunction = linearFunctions[0];
else
linearFunction = linearFunctions[player];
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = linearFunction.predict(featureVectors[i]);
}
final FVector distribution = FVector.wrap(logits);
distribution.softmax();
return distribution;
}
/**
* @param errors Vector of errors in distributions
* @param featureVectors One feature vector for every element
* (action) in the distributions.
* @param player The player whose parameters we want to compute gradients of
* @return Vector of gradients of the loss function (assumed to be
* cross-entropy loss) with respect to our linear function's vector of
* parameters.
*/
public FVector computeParamGradients
(
final FVector errors,
final FeatureVector[] featureVectors,
final int player
)
{
final LinearFunction linearFunction;
if (linearFunctions.length == 1)
linearFunction = linearFunctions[0];
else
linearFunction = linearFunctions[player];
// now compute gradients w.r.t. parameters
final FVector grads = new FVector(linearFunction.trainableParams().allWeights().dim());
final int numActions = errors.dim();
for (int i = 0; i < numActions; ++i)
{
// error for this action
final float error = errors.get(i);
// feature vector for this action
final FeatureVector featureVector = featureVectors[i];
// Feature values for aspatial features (dense representation); gradients for these first
final FVector aspatialFeatureValues = featureVector.aspatialFeatureValues();
final int numAspatialFeatures = aspatialFeatureValues.dim();
for (int j = 0; j < numAspatialFeatures; ++j)
{
grads.addToEntry(j, error * featureVector.aspatialFeatureValues().get(j));
}
// Saprse representation of active spatial features; gradients for these second,
// with offset for indexing based on number of aspatial features
final TIntArrayList activeSpatialFeatureIndices = featureVector.activeSpatialFeatureIndices();
for (int j = 0; j < activeSpatialFeatureIndices.size(); ++j)
{
final int featureIdx = activeSpatialFeatureIndices.getQuick(j);
grads.addToEntry(featureIdx + numAspatialFeatures, error);
}
}
//System.out.println("est. distr. = " + estimatedDistribution);
//System.out.println("tar. distr. = " + targetDistribution);
//System.out.println("errors = " + errors);
//System.out.println("grads = " + grads);
return grads;
}
/**
* @param distribution
* @return Samples an action index from a previously-computed distribution
*/
@SuppressWarnings("static-method")
public int selectActionFromDistribution(final FVector distribution)
{
return distribution.sampleFromDistribution();
}
/**
* Updates this policy to use a new array of Feature Sets.
* For now, this method assumes that Feature Sets can only grow, and that newly-added
* features are always new spatial features.
*
* @param newFeatureSets
*/
public void updateFeatureSets(final BaseFeatureSet[] newFeatureSets)
{
for (int i = 0; i < linearFunctions.length; ++i)
{
if (newFeatureSets[i] != null)
{
final int numExtraFeatures = newFeatureSets[i].getNumSpatialFeatures() - featureSets[i].getNumSpatialFeatures();
for (int j = 0; j < numExtraFeatures; ++j)
{
linearFunctions[i].setTheta(new WeightVector(linearFunctions[i].trainableParams().allWeights().append(0.f)));
}
featureSets[i] = newFeatureSets[i];
}
else if (newFeatureSets[0] != null)
{
// Handle the case where all players have different functions but share the 0th feature set
final int numExtraFeatures = newFeatureSets[0].getNumSpatialFeatures() - featureSets[0].getNumSpatialFeatures();
for (int j = 0; j < numExtraFeatures; ++j)
{
linearFunctions[i].setTheta(new WeightVector(linearFunctions[i].trainableParams().allWeights().append(0.f)));
}
}
}
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final WeightVector[] params = new WeightVector[linearFunctions.length];
for (int i = 0; i < linearFunctions.length; ++i)
{
if (linearFunctions[i] == null)
{
params[i] = null;
}
else
{
params[i] = linearFunctions[i].effectiveParams();
}
}
final PlayoutMoveSelector playoutMoveSelector;
if (epsilon < 1.0)
{
if (epsilon <= 0.0)
playoutMoveSelector = new FeaturesSoftmaxMoveSelector(featureSets, params, true);
else
playoutMoveSelector = new EpsilonGreedyWrapper(new FeaturesSoftmaxMoveSelector(featureSets, params, true), epsilon);
}
else
{
playoutMoveSelector = null;
}
return context.game().playout
(
context,
null,
1.0,
playoutMoveSelector,
playoutActionLimit,
playoutTurnLimit,
ThreadLocalRandom.current()
);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
final List<String> policyWeightsFilepaths = new ArrayList<String>();
Features featuresMetadata = null;
boolean boosted = false;
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("policyweights="))
{
if (policyWeightsFilepaths.size() > 0)
policyWeightsFilepaths.clear();
policyWeightsFilepaths.add(input.substring("policyweights=".length()));
}
else if (input.toLowerCase().startsWith("policyweights"))
{
for (int p = 1; p <= Constants.MAX_PLAYERS; ++p)
{
if (input.toLowerCase().startsWith("policyweights" + p + "="))
{
while (policyWeightsFilepaths.size() <= p)
{
policyWeightsFilepaths.add(null);
}
if (p < 10)
policyWeightsFilepaths.set(p, input.substring("policyweightsX=".length()));
else // Doubt we'll ever have more than 99 players
policyWeightsFilepaths.set(p, input.substring("policyweightsXX=".length()));
}
}
}
else if (input.toLowerCase().startsWith("featuresmetadata="))
{
final String featuresMetadatFilepath = input.substring("featuresmetadata=".length());
try
{
featuresMetadata =
(Features)compiler.Compiler.compileObject
(
FileHandling.loadTextContentsFromFile(featuresMetadatFilepath),
"metadata.ai.features.Features",
new Report()
);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
else if (input.toLowerCase().startsWith("playoutactionlimit="))
{
playoutActionLimit =
Integer.parseInt(input.substring(
"playoutactionlimit=".length()));
}
else if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
else if (input.toLowerCase().startsWith("friendly_name="))
{
friendlyName = input.substring("friendly_name=".length());
}
else if (input.toLowerCase().startsWith("boosted="))
{
if (input.toLowerCase().endsWith("true"))
{
boosted = true;
}
}
else if (input.toLowerCase().startsWith("epsilon="))
{
epsilon = Double.parseDouble(input.substring("epsilon=".length()));
}
else if (input.toLowerCase().startsWith("implementation="))
{
implementation = FeatureSetImplementations.valueOf(input.substring("implementation=".length()).toUpperCase());
}
}
if (!policyWeightsFilepaths.isEmpty())
{
this.linearFunctions = new LinearFunction[policyWeightsFilepaths.size()];
this.featureSets = new BaseFeatureSet[linearFunctions.length];
for (int i = 0; i < policyWeightsFilepaths.size(); ++i)
{
String policyWeightsFilepath = policyWeightsFilepaths.get(i);
if (policyWeightsFilepath != null)
{
final String parentDir = new File(policyWeightsFilepath).getParent();
if (!new File(policyWeightsFilepath).exists())
{
// Replace with whatever is the latest file we have
if (policyWeightsFilepath.contains("Selection"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsSelection_P" + i, "txt");
}
else if (policyWeightsFilepath.contains("Playout"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsPlayout_P" + i, "txt");
}
else if (policyWeightsFilepath.contains("TSPG"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsTSPG_P" + i, "txt");
}
else if (policyWeightsFilepath.contains("PolicyWeightsCE"))
{
policyWeightsFilepath =
ExperimentFileUtils.getLastFilepath(parentDir + "/PolicyWeightsCE_P" + i, "txt");
}
else
{
policyWeightsFilepath = null;
}
}
if (policyWeightsFilepath == null)
System.err.println("Cannot resolve policy weights filepath: " + policyWeightsFilepaths.get(i));
if (boosted)
linearFunctions[i] = BoostedLinearFunction.boostedFromFile(policyWeightsFilepath, null);
else
linearFunctions[i] = LinearFunction.fromFile(policyWeightsFilepath);
final String featureSetFilepath = parentDir + File.separator + linearFunctions[i].featureSetFile();
final BaseFeatureSet featureSet;
switch (implementation)
{
case NAIVE:
featureSet = new NaiveFeatureSet(featureSetFilepath);
break;
case TREE:
featureSet = new LegacyFeatureSet(featureSetFilepath);
break;
case SPATTERNET:
featureSet = new SPatterNetFeatureSet(featureSetFilepath);
break;
case JITSPATTERNET:
featureSet = JITSPatterNetFeatureSet.construct(featureSetFilepath);
break;
default:
System.err.println("Unrecognised feature set implementation: " + implementation);
return;
}
featureSets[i] = featureSet;
}
}
}
else if (featuresMetadata != null)
{
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LinearFunction> linFuncs = new ArrayList<LinearFunction>();
for (final metadata.ai.features.FeatureSet featureSet : featuresMetadata.featureSets())
{
if (featureSet.role() == RoleType.Shared || featureSet.role() == RoleType.Neutral)
addFeatureSetWeights(0, featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
else
addFeatureSetWeights(featureSet.role().owner(), featureSet.featureStrings(), featureSet.selectionWeights(), featureSetsList, linFuncs);
}
this.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
this.linearFunctions = linFuncs.toArray(new LinearFunction[linFuncs.size()]);
}
else
{
System.err.println("Cannot construct linear Softmax Policy from: " + Arrays.toString(inputs));
}
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final Moves actions = game.moves(context);
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
}
else
{
featureSet = featureSets[context.state().mover()];
}
return actions.moves().get
(
selectActionFromDistribution
(
computeDistribution
(
featureSet.computeFeatureVectors
(
context,
actions.moves(),
true
),
context.state().mover()
)
)
);
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (featureSets.length == 1)
{
final int[] supportedPlayers = new int[game.players().count()];
for (int i = 0; i < supportedPlayers.length; ++i)
{
supportedPlayers[i] = i + 1;
}
featureSets[0].init(game, supportedPlayers, linearFunctions[0].effectiveParams());
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].init(game, new int[] {i}, linearFunctions[i].effectiveParams());
}
}
}
@Override
public void closeAI()
{
if (featureSets == null)
return;
if (featureSets.length == 1)
{
featureSets[0].closeCache();
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].closeCache();
}
}
}
//-------------------------------------------------------------------------
/**
* @param player
* @return Linear function corresponding to given player
*/
public LinearFunction linearFunction(final int player)
{
if (linearFunctions.length == 1)
return linearFunctions[0];
else
return linearFunctions[player];
}
/**
* @return The linear functions used to compute logits
*/
public LinearFunction[] linearFunctions()
{
return linearFunctions;
}
/**
* @return Feature Sets used by this policy
*/
public BaseFeatureSet[] featureSets()
{
return featureSets;
}
//-------------------------------------------------------------------------
/**
* @return A metadata Features item describing the features + weights for this policy
*/
// public metadata.ai.features.Features generateFeaturesMetadata()
// {
// final Features features;
//
// if (featureSets.length == 1)
// {
// // Just a single featureset for all players
// final BaseFeatureSet featureSet = featureSets[0];
// final LinearFunction linFunc = linearFunctions[0];
// final Pair[] pairs = new Pair[featureSet.spatialFeatures().length];
//
// for (int i = 0; i < pairs.length; ++i)
// {
// final float weight = linFunc.effectiveParams().allWeights().get(i);
// pairs[i] = new Pair(featureSet.spatialFeatures()[i].toString(), Float.valueOf(weight));
//
// if (Float.isNaN(weight))
// System.err.println("WARNING: writing NaN weight");
// else if (Float.isInfinite(weight))
// System.err.println("WARNING: writing infinity weight");
// }
//
// features = new Features(new metadata.ai.features.FeatureSet(RoleType.Shared, pairs));
// }
// else
// {
// // One featureset per player
// final metadata.ai.features.FeatureSet[] metadataFeatureSets = new metadata.ai.features.FeatureSet[featureSets.length - 1];
//
// for (int p = 0; p < featureSets.length; ++p)
// {
// final BaseFeatureSet featureSet = featureSets[p];
// if (featureSet == null)
// continue;
//
// final LinearFunction linFunc = linearFunctions[p];
// final Pair[] pairs = new Pair[featureSet.spatialFeatures().length];
//
// for (int i = 0; i < pairs.length; ++i)
// {
// final float weight = linFunc.effectiveParams().allWeights().get(i);
// pairs[i] = new Pair(featureSet.spatialFeatures()[i].toString(), Float.valueOf(weight));
//
// if (Float.isNaN(weight))
// System.err.println("WARNING: writing NaN weight");
// else if (Float.isInfinite(weight))
// System.err.println("WARNING: writing infinity weight");
// }
//
// metadataFeatureSets[p - 1] = new metadata.ai.features.FeatureSet(RoleType.roleForPlayerId(p), pairs);
// }
//
// features = new Features(metadataFeatureSets);
// }
//
// return features;
// }
//-------------------------------------------------------------------------
/**
* @param lines
* @return A softmax policy constructed from a given array of input lines
*/
public static SoftmaxPolicy fromLines(final String[] lines)
{
SoftmaxPolicy policy = null;
for (final String line : lines)
{
if (line.equalsIgnoreCase("features=from_metadata"))
{
policy = new SoftmaxFromMetadataSelection(0.0);
break;
}
}
if (policy == null)
policy = new SoftmaxPolicyLinear();
policy.customise(lines);
return policy;
}
/**
* @param weightsFile
* @return A Softmax policy constructed from a given file
*/
public static SoftmaxPolicyLinear fromFile(final File weightsFile)
{
final SoftmaxPolicyLinear policy = new SoftmaxPolicyLinear();
boolean boosted = false;
try (final BufferedReader reader = new BufferedReader(
new InputStreamReader(new FileInputStream(weightsFile.getAbsolutePath()), "UTF-8")))
{
String line = reader.readLine();
String lastLine = null;
while (line != null)
{
lastLine = line;
line = reader.readLine();
}
if (!lastLine.startsWith("FeatureSet="))
{
boosted = true;
}
}
catch (final IOException e)
{
e.printStackTrace();
}
policy.customise(new String[]{
"softmax",
"policyweights=" + weightsFile.getAbsolutePath(),
"boosted=" + boosted
});
return policy;
}
//-------------------------------------------------------------------------
/**
* Helper method that adds a Feature Set and a Linear Function for the
* given player index
*
* @param playerIdx
* @param featureStrings
* @param featureWeights
* @param outFeatureSets
* @param outLinFuncs
*/
protected void addFeatureSetWeights
(
final int playerIdx,
final String[] featureStrings,
final float[] featureWeights,
final List<BaseFeatureSet> outFeatureSets,
final List<LinearFunction> outLinFuncs
)
{
while (outFeatureSets.size() <= playerIdx)
{
outFeatureSets.add(null);
}
while (outLinFuncs.size() <= playerIdx)
{
outLinFuncs.add(null);
}
final List<AspatialFeature> aspatialFeatures = new ArrayList<AspatialFeature>();
final List<SpatialFeature> spatialFeatures = new ArrayList<SpatialFeature>();
final TFloatArrayList weights = new TFloatArrayList();
for (int i = 0; i < featureStrings.length; ++i)
{
final Feature feature = Feature.fromString(featureStrings[i]);
if (feature instanceof AspatialFeature)
aspatialFeatures.add((AspatialFeature)feature);
else
spatialFeatures.add((SpatialFeature)feature);
weights.add(featureWeights[i]);
}
final BaseFeatureSet featureSet;
switch (implementation)
{
case NAIVE:
featureSet = new NaiveFeatureSet(aspatialFeatures, spatialFeatures);
break;
case TREE:
featureSet = new LegacyFeatureSet(aspatialFeatures, spatialFeatures);
break;
case SPATTERNET:
featureSet = new SPatterNetFeatureSet(aspatialFeatures, spatialFeatures);
break;
case JITSPATTERNET:
featureSet = JITSPatterNetFeatureSet.construct(aspatialFeatures, spatialFeatures);
break;
default:
System.err.println("Unrecognised feature set implementation: " + implementation);
return;
}
outFeatureSets.set(playerIdx, featureSet);
outLinFuncs.set(playerIdx, new LinearFunction(new WeightVector(new FVector(weights.toArray()))));
}
//-------------------------------------------------------------------------
}
| 27,272 | 28.452484 | 147 | java |
Ludii | Ludii-master/AI/src/policies/softmax/SoftmaxPolicyLogitTree.java | package policies.softmax;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import decision_trees.logits.LogitTreeNode;
import features.Feature;
import features.FeatureVector;
import features.aspatial.AspatialFeature;
import features.feature_sets.BaseFeatureSet;
import features.feature_sets.network.JITSPatterNetFeatureSet;
import features.spatial.SpatialFeature;
import game.Game;
import game.rules.play.moves.Moves;
import game.types.play.RoleType;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.features.trees.FeatureTrees;
import metadata.ai.features.trees.logits.LogitNode;
import metadata.ai.features.trees.logits.LogitTree;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
import other.trial.Trial;
import playout_move_selectors.EpsilonGreedyWrapper;
import playout_move_selectors.LogitTreeMoveSelector;
import search.mcts.MCTS;
/**
* A policy that uses a Logit (Regression) Tree to compute logits per move,
* and then a probability distribution over those moves using a softmax.
*
* @author Dennis Soemers
*/
public class SoftmaxPolicyLogitTree extends SoftmaxPolicy
{
//-------------------------------------------------------------------------
/**
* Roots of regression trees that can output logits (one per legal move).
*
* If it contains only one root, it will be shared across all
* players. Otherwise, it will contain one root per player.
*/
protected LogitTreeNode[] regressionTreeRoots;
/**
* Feature Sets to use to generate feature vectors for state+action pairs.
*
* If it contains only one feature set, it will be shared across all
* players. Otherwise, it will contain one Feature Set per player.
*/
protected BaseFeatureSet[] featureSets;
/** Temperature for distribution */
protected double temperature = 1.0;
/** Do we want to play greedily? */
protected boolean greedy = false;
//-------------------------------------------------------------------------
/**
* Default constructor. Will initialise important parts to null and break
* down if used directly. Should customise() it first!
*/
public SoftmaxPolicyLogitTree()
{
regressionTreeRoots = null;
featureSets = null;
}
/**
* Constructs a softmax policy with regression tree(s) for logits
* @param regressionTreeRoots
* @param featureSets
*/
public SoftmaxPolicyLogitTree
(
final LogitTreeNode[] regressionTreeRoots,
final BaseFeatureSet[] featureSets
)
{
this.regressionTreeRoots = regressionTreeRoots;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
}
/**
* Constructs a softmax policy with regression tree(s) for logits,
* and a limit on the number of play-out actions to run with this policy
* plus a fallback Play-out strategy to use afterwards.
*
* @param regressionTreeRoots
* @param featureSets
* @param playoutActionLimit
*/
public SoftmaxPolicyLogitTree
(
final LogitTreeNode[] regressionTreeRoots,
final BaseFeatureSet[] featureSets,
final int playoutActionLimit
)
{
this.regressionTreeRoots = regressionTreeRoots;
this.featureSets = Arrays.copyOf(featureSets, featureSets.length);
this.playoutActionLimit = playoutActionLimit;
}
/**
* Constructs a softmax policy from a given set of feature trees as created
* by the compiler, using the Selection weights.
*
* @param featureTrees
* @param epsilon Epsilon for epsilon-greedy playouts
*/
public static SoftmaxPolicyLogitTree constructPolicy(final FeatureTrees featureTrees, final double epsilon)
{
final SoftmaxPolicyLogitTree softmax = new SoftmaxPolicyLogitTree();
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LogitTreeNode> roots = new ArrayList<LogitTreeNode>();
for (final LogitTree logitTree : featureTrees.logitTrees())
{
if (logitTree.role() == RoleType.Shared || logitTree.role() == RoleType.Neutral)
addFeatureSetRoot(0, logitTree.root(), featureSetsList, roots);
else
addFeatureSetRoot(logitTree.role().owner(), logitTree.root(), featureSetsList, roots);
}
softmax.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
softmax.regressionTreeRoots = roots.toArray(new LogitTreeNode[roots.size()]);
softmax.epsilon = epsilon;
return softmax;
}
//-------------------------------------------------------------------------
@Override
public FVector computeDistribution
(
final Context context,
final FastArrayList<Move> actions,
final boolean thresholded
)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
return computeDistribution(featureSet.computeFeatureVectors(context, actions, thresholded), context.state().mover());
}
@Override
public float computeLogit(final Context context, final Move move)
{
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
featureSet = featureSets[0];
else
featureSet = featureSets[context.state().mover()];
final LogitTreeNode regressionTreeRoot;
if (regressionTreeRoots.length == 1)
regressionTreeRoot = regressionTreeRoots[0];
else
regressionTreeRoot = regressionTreeRoots[context.state().mover()];
return regressionTreeRoot.predict(featureSet.computeFeatureVector(context, move, true));
}
/**
* @param featureVectors
* @param player
* @return Probability distribution over actions implied by a list of sparse
* feature vectors
*/
public FVector computeDistribution
(
final FeatureVector[] featureVectors,
final int player
)
{
final float[] logits = new float[featureVectors.length];
final LogitTreeNode regressionTreeRoot;
if (regressionTreeRoots.length == 1)
regressionTreeRoot = regressionTreeRoots[0];
else
regressionTreeRoot = regressionTreeRoots[player];
for (int i = 0; i < featureVectors.length; ++i)
{
logits[i] = regressionTreeRoot.predict(featureVectors[i]);
}
final FVector distribution = FVector.wrap(logits);
distribution.softmax(temperature);
return distribution;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final PlayoutMoveSelector playoutMoveSelector;
if (epsilon < 1.0)
{
if (epsilon <= 0.0)
playoutMoveSelector = new LogitTreeMoveSelector(featureSets, regressionTreeRoots, greedy, temperature);
else
playoutMoveSelector = new EpsilonGreedyWrapper(new LogitTreeMoveSelector(featureSets, regressionTreeRoots, greedy, temperature), epsilon);
}
else
{
playoutMoveSelector = null;
}
return context.game().playout
(
context,
null,
1.0,
playoutMoveSelector,
playoutActionLimit,
playoutTurnLimit,
ThreadLocalRandom.current()
);
}
@Override
public boolean playoutSupportsGame(final Game game)
{
return supportsGame(game);
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
String policyTreesFilepath = null;
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("policytrees="))
{
policyTreesFilepath = input.substring("policytrees=".length());
}
else if (input.toLowerCase().startsWith("playoutactionlimit="))
{
playoutActionLimit =
Integer.parseInt(input.substring(
"playoutactionlimit=".length()));
}
else if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
else if (input.toLowerCase().startsWith("friendly_name="))
{
friendlyName = input.substring("friendly_name=".length());
}
else if (input.toLowerCase().startsWith("epsilon="))
{
epsilon = Double.parseDouble(input.substring("epsilon=".length()));
}
else if (input.toLowerCase().startsWith("greedy="))
{
greedy = Boolean.parseBoolean(input.substring("greedy=".length()));
}
else if (input.toLowerCase().startsWith("temperature="))
{
temperature = Double.parseDouble(input.substring("temperature=".length()));
}
}
if (policyTreesFilepath != null)
{
final List<BaseFeatureSet> featureSetsList = new ArrayList<BaseFeatureSet>();
final List<LogitTreeNode> roots = new ArrayList<LogitTreeNode>();
try
{
final String featureTreesString = FileHandling.loadTextContentsFromFile(policyTreesFilepath);
final FeatureTrees featureTrees =
(FeatureTrees)compiler.Compiler.compileObject
(
featureTreesString,
"metadata.ai.features.trees.FeatureTrees",
new Report()
);
for (final LogitTree logitTree : featureTrees.logitTrees())
{
if (logitTree.role() == RoleType.Shared || logitTree.role() == RoleType.Neutral)
addFeatureSetRoot(0, logitTree.root(), featureSetsList, roots);
else
addFeatureSetRoot(logitTree.role().owner(), logitTree.root(), featureSetsList, roots);
}
this.featureSets = featureSetsList.toArray(new BaseFeatureSet[featureSetsList.size()]);
this.regressionTreeRoots = roots.toArray(new LogitTreeNode[roots.size()]);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
else
{
System.err.println("Cannot construct Softmax Policy Logit Tree from: " + Arrays.toString(inputs));
}
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final Moves actions = game.moves(context);
final BaseFeatureSet featureSet;
if (featureSets.length == 1)
{
featureSet = featureSets[0];
}
else
{
featureSet = featureSets[context.state().mover()];
}
final FVector distribution =
computeDistribution
(
featureSet.computeFeatureVectors
(
context,
actions.moves(),
true
),
context.state().mover()
);
if (greedy)
return actions.moves().get(distribution.argMaxRand());
else
return actions.moves().get(distribution.sampleFromDistribution());
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (featureSets.length == 1)
{
final int[] supportedPlayers = new int[game.players().count()];
for (int i = 0; i < supportedPlayers.length; ++i)
{
supportedPlayers[i] = i + 1;
}
featureSets[0].init(game, supportedPlayers, null);
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].init(game, new int[] {i}, null);
}
}
}
@Override
public void closeAI()
{
if (featureSets == null)
return;
if (featureSets.length == 1)
{
featureSets[0].closeCache();
}
else
{
for (int i = 1; i < featureSets.length; ++i)
{
featureSets[i].closeCache();
}
}
}
//-------------------------------------------------------------------------
/**
* @return Feature Sets used by this policy
*/
public BaseFeatureSet[] featureSets()
{
return featureSets;
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return A softmax logit tree policy constructed from a given array of input lines
*/
public static SoftmaxPolicyLogitTree fromLines(final String[] lines)
{
SoftmaxPolicyLogitTree policy = null;
// for (final String line : lines)
// {
// if (line.equalsIgnoreCase("features=from_metadata"))
// {
// policy = new SoftmaxFromMetadataSelection(0.0);
// break;
// }
// }
//
// if (policy == null)
policy = new SoftmaxPolicyLogitTree();
policy.customise(lines);
return policy;
}
//-------------------------------------------------------------------------
/**
* Helper method that adds a Feature Set and a Linear Function for the
* given player index
*
* @param playerIdx
* @param rootNode
* @param outFeatureSets
* @param outRoots
*/
protected static void addFeatureSetRoot
(
final int playerIdx,
final LogitNode rootNode,
final List<BaseFeatureSet> outFeatureSets,
final List<LogitTreeNode> outRoots
)
{
while (outFeatureSets.size() <= playerIdx)
{
outFeatureSets.add(null);
}
while (outRoots.size() <= playerIdx)
{
outRoots.add(null);
}
final List<AspatialFeature> aspatialFeatures = new ArrayList<AspatialFeature>();
final List<SpatialFeature> spatialFeatures = new ArrayList<SpatialFeature>();
final Set<String> featureStrings = new HashSet<String>();
rootNode.collectFeatureStrings(featureStrings);
for (final String featureString : featureStrings)
{
final Feature feature = Feature.fromString(featureString);
if (feature instanceof AspatialFeature)
aspatialFeatures.add((AspatialFeature)feature);
else
spatialFeatures.add((SpatialFeature)feature);
}
final BaseFeatureSet featureSet = JITSPatterNetFeatureSet.construct(aspatialFeatures, spatialFeatures);
outFeatureSets.set(playerIdx, featureSet);
outRoots.set(playerIdx, LogitTreeNode.fromMetadataNode(rootNode, featureSet));
}
//-------------------------------------------------------------------------
}
| 13,772 | 25.640232 | 142 | java |
Ludii | Ludii-master/AI/src/search/flat/FlatMonteCarlo.java | package search.flat;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.AI;
import other.RankUtils;
import other.context.Context;
import other.model.Model;
import other.move.Move;
import utils.AIUtils;
/**
* A simple Flat Monte-Carlo AI.
*
* @author Dennis Soemers
*/
public class FlatMonteCarlo extends AI
{
//-------------------------------------------------------------------------
/** Our player index */
protected int player = -1;
/** Sums of scores of the last search we ran */
protected int[] lastScoreSums = null;
/** Visit counts of the last search we ran */
protected int[] lastVisitCounts = null;
/** List of legal actions for which we ran last search */
protected FastArrayList<Move> lastActionList = null;
/** We'll automatically return our move after at most this number of seconds if we only have one move */
protected double autoPlaySeconds = 0.5;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public FlatMonteCarlo()
{
friendlyName = "Flat MC";
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
final int maxIts = (maxIterations >= 0) ? maxIterations : Integer.MAX_VALUE;
FastArrayList<Move> legalMoves = game.moves(context).moves();
//System.out.println("legal moves for all players = " + legalMoves);
if (!game.isAlternatingMoveGame())
legalMoves = AIUtils.extractMovesForMover(legalMoves, player);
//System.out.println("legal moves for player " + player + " = " + legalMoves);
final int numActions = legalMoves.size();
if (numActions == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
final int[] sumScores = new int[numActions];
final int[] numVisits = new int[numActions];
int numIterations = 0;
// Simulate until we have to stop
while (numIterations < maxIts && System.currentTimeMillis() < stopTime)
{
final Context copyContext = copyContext(context);
final Model model = copyContext.model();
model.startNewStep(copyContext, null, 1.0, -1, -1, 0.0, false, false, false);
final int firstAction = ThreadLocalRandom.current().nextInt(numActions);
model.applyHumanMove(copyContext, legalMoves.get(firstAction), player);
if (!model.isReady())
{
// this means we're in a simultaneous-move game, randomly select actions for opponents
model.randomStep(copyContext, null, null);
}
if (!copyContext.trial().over())
{
copyContext.game().playout
(
copyContext,
null,
1.0,
null,
0,
-1,
ThreadLocalRandom.current()
);
}
numVisits[firstAction] += 1;
final double[] utilities = RankUtils.utilities(copyContext);
sumScores[firstAction] += utilities[player];
++numIterations;
}
final List<Move> bestActions = new ArrayList<Move>();
double maxAvgScore = Double.NEGATIVE_INFINITY;
for (int i = 0; i < numActions; ++i)
{
final double avgScore = numVisits[i] == 0 ? -100.0 : (double) sumScores[i] / numVisits[i];
//System.out.println("avgScore for " + legalMoves.get(i) + " = " + avgScore);
if (avgScore > maxAvgScore)
{
maxAvgScore = avgScore;
bestActions.clear();
bestActions.add(legalMoves.get(i));
}
else if (avgScore == maxAvgScore)
{
bestActions.add(legalMoves.get(i));
}
}
lastScoreSums = sumScores;
lastVisitCounts = numVisits;
lastActionList = new FastArrayList<Move>(legalMoves);
//System.out.println("returning best action");
return bestActions.get(ThreadLocalRandom.current().nextInt(bestActions.size()));
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
this.player = playerID;
lastScoreSums = null;
lastVisitCounts = null;
lastActionList = null;
}
/**
* @return Sums of scores of last search
*/
public int[] lastScoreSums()
{
return lastScoreSums;
}
/**
* @return Visit counts of last search
*/
public int[] lastVisitCounts()
{
return lastVisitCounts;
}
/**
* @return List of legal actions of last search
*/
public FastArrayList<Move> lastActionList()
{
return lastActionList;
}
@Override
public boolean supportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return false;
return true;
}
@Override
public AIVisualisationData aiVisualisationData()
{
if (lastActionList == null)
return null;
final FVector aiDistribution = new FVector(lastActionList.size());
final FVector valueEstimates = new FVector(lastActionList.size());
final FastArrayList<Move> moves = new FastArrayList<>();
for (int i = 0; i < lastActionList.size(); ++i)
{
aiDistribution.set(i, (float) ((double) lastScoreSums[i] / lastVisitCounts[i]));
valueEstimates.set(i, (float) ((double) lastScoreSums[i] / lastVisitCounts[i]));
moves.add(lastActionList.get(i));
}
return new AIVisualisationData(aiDistribution, valueEstimates, moves);
}
//-------------------------------------------------------------------------
}
| 5,896 | 25.443946 | 105 | java |
Ludii | Ludii-master/AI/src/search/flat/HeuristicSampleAdapted.java | package search.flat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import javax.swing.JTable;
import game.Game;
import main.collections.FVector;
import main.collections.FastArrayList;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.PlayerRegionsProximity;
import metadata.ai.heuristics.terms.RegionProximity;
import other.action.Action;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.move.MoveScore;
import search.flat.HeuristicSampleAdaptedUtils.HeuristicProportionViewInterface;
/**
* Include the visualisation of weightings and wait out the thinking time
*
* @author Markus
*
*/
public class HeuristicSampleAdapted extends HeuristicSampling
{
@SuppressWarnings("unused")
private FastArrayList<Move> moves;
private FastArrayList<MoveScore> moveScores;
private boolean waitForUserAction;
private boolean autoSelect = false;
private boolean heuristicChanged;
private boolean randomizingTerm = false;
private MoveHeuristicEvaluation latestMoveHeuristicEvaluation;
private HeuristicProportionViewInterface heuristicProportionView;
private Move userSelectedMove = null;
@Override
public void setHeuristics(Heuristics heuristics) {
super.setHeuristics(heuristics);
heuristicChanged = true;
}
/**
* called when playout sampling is turned of or on
*/
public void recalculateHeuristics() {
heuristicChanged = true;
}
public void setAutoSelect(boolean autoSelect) {
this.autoSelect = autoSelect;
}
public HeuristicSampleAdapted(Heuristics heuristic) {
super(heuristic);
setHeuristics(heuristic);
}
public HeuristicSampleAdapted() {
super();
}
public HeuristicSampleAdapted(Heuristics heuristic, int fraction) {
super(heuristic, fraction);
setHeuristics(heuristic);
}
public HeuristicSampleAdapted(int fraction) {
super(fraction);
}
public MoveHeuristicEvaluation getLatestMoveHeuristicEvaluation() {
return latestMoveHeuristicEvaluation;
}
@Override
public Move selectAction(final Game game, final Context context, final double maxSeconds, final int maxIterations,
final int maxDepth) {
long startTime = System.currentTimeMillis();
boolean repeatCondition = false;
waitForUserAction = true;
userSelectedMove = null;
Move move = null;
do {
heuristicChanged = false;
final MoveScore moveScore = evaluateMoves(game, context, 1);
move = moveScore.move();
if (move == null)
System.out.println("** No best move.");
boolean waitingCondition = false;
do {
if (wantsInterrupt)
return null;
if (!(autoSelect && !heuristicChanged))try {
Thread.sleep(16);
} catch (InterruptedException e) {
e.printStackTrace();
}
long remainingTime = (long) (maxSeconds * 1000 - (System.currentTimeMillis() - startTime));
waitingCondition = (!heuristicChanged && !autoSelect && waitForUserAction)
|| (autoSelect && remainingTime > 0 && !heuristicChanged);
} while (waitingCondition);
long remainingTime = (long) (maxSeconds * 1000 - (System.currentTimeMillis() - startTime));
repeatCondition = (autoSelect && heuristicChanged && remainingTime > 0)
|| (!autoSelect && heuristicChanged && waitForUserAction);
} while (repeatCondition);
if (userSelectedMove!=null)return userSelectedMove;
return move;
}
@Override
public AIVisualisationData aiVisualisationData() {
FVector aiDistribution = new FVector(moveScores.size());
FVector valueEstimates = new FVector(moveScores.size());
FastArrayList<Move> movesList = new FastArrayList<>(moveScores.size());
float minNegative = 0;
float maxNegative = Long.MIN_VALUE;
float minPositive = Long.MAX_VALUE;
float maxPositive = 0;
float deltaNegative = 0f;
float deltaPositive = 0f;
boolean noPositive = true;
boolean noNegative = true;
for (int j = 0; j < moveScores.size(); j++) {
MoveScore moveScore = moveScores.get(j);
aiDistribution.set(j, 1.f);
valueEstimates.set(j, moveScore.score());
movesList.add(moveScore.move());
float score = moveScore.score();
if (score >= 0) {
noPositive = false;
if (score < minPositive)
minPositive = score;
if (score > maxPositive)
maxPositive = score;
}
if (score < 0) {
noNegative = false;
if (score < minNegative)
minNegative = score;
if (score > maxNegative)
maxNegative = score;
}
}
deltaNegative = maxNegative - minNegative;
deltaPositive = maxPositive - minPositive;
if (noPositive) {
minPositive = 0f;
maxPositive = 0f;
deltaPositive = 0f;
}
if (noNegative) {
minNegative = 0f;
maxNegative = 0f;
deltaNegative = 0f;
}
for (int i = 0; i < valueEstimates.dim(); i++) {
float newVal = valueEstimates.get(i);
if (newVal == 0)
continue;
if (newVal < 0 && deltaNegative != 0) {
newVal = 0.f - ((maxNegative - newVal) / deltaNegative);
} else if (newVal < 0 && deltaNegative == 0) {
newVal = -1;
}
if (newVal > 0 && deltaPositive != 0) {
newVal = 0.f + ((newVal - minPositive) / deltaPositive);
} else if (newVal > 0 && deltaPositive == 0) {
newVal = 1;
}
valueEstimates.set(i, newVal);
}
// FVector f = new FVector(valueEstimates);
ArrayList<Entry<Float, Integer>> entries = new ArrayList<Entry<Float, Integer>>();
for (int i = 0; i < valueEstimates.dim(); i++) {
int indexLoop = i;
entries.add(new Entry<Float, Integer>() {
Integer index = Integer.valueOf(indexLoop);
@Override
public Float getKey() {
return Float.valueOf(valueEstimates.get(indexLoop));
}
@Override
public Integer getValue() {
return index;
}
@Override
public Integer setValue(Integer value) {
index = value;
return index;
}
});
}
Collections.sort(entries, new Comparator<Entry<Float, Integer>>() {
@Override
public int compare(Entry<Float, Integer> o1, Entry<Float, Integer> o2) {
return -o1.getKey().compareTo(o2.getKey());
}
});
if(entries.size() == 0) return null;
Entry<Float, Integer> array_element = entries.get(0);
float newVal = ((entries.size() - 0) * 1.f) / (entries.size()) * 1.0f + 0.0f;
aiDistribution.set(array_element.getValue().intValue(), newVal);
Entry<Float, Integer> lastEntry = array_element;
for (int i = 1; i < entries.size(); i++) {
Entry<Float, Integer> entry = entries.get(i);
if (lastEntry.getKey().equals(entry.getKey())) {
aiDistribution.set(entry.getValue().intValue(), aiDistribution.get(lastEntry.getValue().intValue()));
} else {
float newValLoop = ((entries.size() - i) * 1.f) / (entries.size()) * 1.0f + 0.0f;
aiDistribution.set(entry.getValue().intValue(), newValLoop);
}
lastEntry = entry;
}
/*
* for (int i = 1; i < entries.size(); i++) { Entry<Float, Integer>
* array_element = entries.get(i);
* aiDistribution.set(array_element.getValue().intValue() if
* (aiDistribution.get(i-1) == aiDistribution.get(i)) { aiDistribution.set(i,
* aiDistribution.get(i-1)); }
*
* }
*/
return new AIVisualisationData(aiDistribution, valueEstimates, movesList);
}
public static class MoveHeuristicEvaluation {
private FastArrayList<Move> moves;
private Context context;
private Heuristics heuristicFunction;
private int mover;
private int[] opponents;
private Game game;
public final static String[] floatNames = new String[] { "finalWeighted", "finalWeightless", "score1Weighted",
"score1Weightless", "scoreOpponentWeighted", "scoreOpponentWeightLess" };
public MoveHeuristicEvaluation(Game game, FastArrayList<Move> moves, Context context,
Heuristics heuristicFunction, int mover, int[] opponents) {
this.moves = addNullMoveAndSort(moves);
this.context = context;
this.heuristicFunction = heuristicFunction;
this.mover = mover;
this.opponents = opponents;
this.game = game;
}
private static FastArrayList<Move> addNullMoveAndSort(FastArrayList<Move> moves2) {
List<Action> a = new ArrayList<Action>();
Move m = new Move(a);
moves2.add(0,m);
ArrayList<Move> sorted = new ArrayList<>();
for (Move move : moves2) {
sorted.add(move);
}
sorted.sort(Comparator.comparing(Move::toString));
FastArrayList<Move> h = new FastArrayList<>();
for (Move move : sorted) {
h.add(move);
}
return h;
}
public Heuristics getHeuristicFunction() {
return heuristicFunction;
}
public HashMap<Move, HashMap<HeuristicTerm, Float[]>> getHashMap() {
HashMap<Move, HashMap<HeuristicTerm, Float[]>> finalMap = new HashMap<>();
for (Move move : moves) {
HashMap<HeuristicTerm, Float[]> termsToValueMap = calculateMove(move);
finalMap.put(move, termsToValueMap);
}
return finalMap;
}
private HashMap<HeuristicTerm, Float[]> calculateMove(Move move) {
final Context contextCopy = new TempContext(context);
game.apply(contextCopy, move);
HashMap<HeuristicTerm, Float[]> termsToValueMap = getHeuristicTermsToValueMap(contextCopy);
return termsToValueMap;
}
private HashMap<HeuristicTerm,Float[]> getHeuristicTermsToValueMap( final Context contextCopy) {
HashMap<HeuristicTerm, Float[]> termsToValueMap = new HashMap<>();
for (HeuristicTerm ht : heuristicFunction.heuristicTerms()) {
float score1 = ht.computeValue(contextCopy, mover, ABS_HEURISTIC_WEIGHT_THRESHOLD);
float score2 = 0;
for (final int opp : opponents) {
if (contextCopy.active(opp))
score2 -= ht.computeValue(contextCopy, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (contextCopy.winners().contains(opp))
score2 -= PARANOID_OPP_WIN_SCORE;
}
float scoreCombined = score1 + score2;
Float[] scores = new Float[] { Float.valueOf(ht.weight() * scoreCombined),
Float.valueOf(score1 + score2), Float.valueOf(ht.weight() * score1), Float.valueOf(score1),
Float.valueOf(ht.weight() * score2), Float.valueOf(score2) };
termsToValueMap.put(ht, scores);
}
return termsToValueMap;
}
public JTable getJTable(String valueType) {
for (int i = 0; i < floatNames.length; i++) {
String string = floatNames[i];
if (string.equals(valueType))
return getJTable(i);
}
return null;
}
public JTable getJTable(int valueType) {
HashMap<Move, HashMap<HeuristicTerm, Float[]>> hashMap = this.getHashMap();
/*
* StringBuilder sb = new StringBuilder(); sb.append("Move: " );
* Set<HeuristicTerm> terms =
* hashMap.entrySet().iterator().next().getValue().keySet(); for (HeuristicTerm
* heuristicTerm : terms) { sb.append(heuristicTerm.getClass().getSimpleName() +
* " "); for (int i = 1; i < floatNames.length; i++) { sb.append("\t"); } }
* sb.append("\n"); sb.append(" "); for (HeuristicTerm heuristicTerm :
* terms) { sb.append(heuristicTerm.getClass().getSimpleName() + " "); for (int
* i = 1; i < floatNames.length; i++) { sb.append("\t"); } }
*/
ArrayList<String> columnNames = new ArrayList<>();
Set<HeuristicTerm> terms = hashMap.entrySet().iterator().next().getValue().keySet();
HashMap<String, HeuristicTerm> nameToHT = new HashMap<>();
for (HeuristicTerm heuristicTerm : terms) {
String className = heuristicTerm.getClass().getSimpleName();
if (heuristicTerm instanceof RegionProximity)
className += " " + ((RegionProximity) heuristicTerm).region();
if (heuristicTerm instanceof PlayerRegionsProximity)
className += " " + ((PlayerRegionsProximity) heuristicTerm).regionPlayer();
columnNames.add(className);
nameToHT.put(columnNames.get(columnNames.size() - 1), heuristicTerm);
}
Collections.sort(columnNames);
columnNames.add(0, "move: ");
columnNames.add(1, "totalWeighted: ");
columnNames.add(2, "QuickHeuristic: ");
columnNames.add(3, "PlayoutScore: ");
Move move = moves.get(0);
HashMap<HeuristicTerm, Float[]> hm = hashMap.get(move);
Object[][] data = new Object[moves.size()][hm.size() * floatNames.length + 1];
for (int i = 0; i < moves.size(); i++) {
Move m = moves.get(i);
hm = hashMap.get(m);
data[i][0] = m;
float sum = 0;
int counter = 4;
for (int j = 4; j < columnNames.size(); j++) {
String name = columnNames.get(j);
HeuristicTerm heuristicTerm = nameToHT.get(name);
Float[] floats = hm.get(heuristicTerm);
sum += floats[0].floatValue();
data[i][counter++] = String.format("%+.2f", floats[valueType]) + "";
}
data[i][1] = Float.valueOf(sum);
}
String[] columNamesArray = columnNames.toArray(new String[columnNames.size()]);
JTable table = new JTable(data, columNamesArray);
return table;
}
public int getMover() {
return mover;
}
public void recalcMove(int selectedRow) {
//HashMap<HeuristicTerm, Float[]> termsToValueMap = calculateMove(this.moves.get(selectedRow));
}
public Move getMove(int selectedIndex) {
return moves.get(selectedIndex);
}
}
public MoveHeuristicEvaluation getMoveHeuristicEvaluation() {
return latestMoveHeuristicEvaluation;
}
@Override
MoveScore evaluateMoves(final Game game, final Context context, final int depth) {
if (randomizingTerm) {
evaluateMoves(game,context,depth,false);
}
return evaluateMoves(game,context,depth,randomizingTerm);
}
private MoveScore evaluateMoves(Game game, Context context, int depth, boolean useRandomisingTerm) {
FastArrayList<Move> movesLocal;
FastArrayList<MoveScore> moveScoresLocal;
movesLocal = selectMoves(game, context, threshold(), depth);
moveScoresLocal = new FastArrayList<MoveScore>();
if (!useRandomisingTerm) {
this.moves = movesLocal;
this.moveScores = moveScoresLocal;
}
float bestScore = Float.NEGATIVE_INFINITY;
Move bestMove = movesLocal.get(0);
final int mover = context.state().mover();
// Context contextCurrent = context;
for (final Move move : movesLocal) {
final Context contextCopy = new TempContext(context);
game.apply(contextCopy, move);
if (!contextCopy.active(mover)) {
if (contextCopy.winners().contains(mover)) {
moveScoresLocal.add(new MoveScore(move, WIN_SCORE));
if (WIN_SCORE > bestScore) {
bestScore = WIN_SCORE;
bestMove = move;
}
//return new MoveScore(move, WIN_SCORE); // Return winning move immediately
continue;
}
else if (contextCopy.losers().contains(mover)) {
moveScoresLocal.add(new MoveScore(move, -WIN_SCORE));
continue; // Skip losing move
}
}
float score = 0;
if (continuation() && contextCopy.state().mover() == mover && depth <= 10) {
// System.out.println("Recursing...");
return new MoveScore(move, evaluateMoves(game, contextCopy, depth + 1,useRandomisingTerm).score());
} else {
score = super.heuristicFunction.computeValue(contextCopy, mover, ABS_HEURISTIC_WEIGHT_THRESHOLD);
for (final int opp : opponents(mover)) {
if (contextCopy.active(opp))
score -= super.heuristicFunction.computeValue(contextCopy, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (contextCopy.winners().contains(opp))
score -= PARANOID_OPP_WIN_SCORE;
}
if (useRandomisingTerm)
score += (float) (ThreadLocalRandom.current().nextInt(1000) / 1000000.0);
}
moveScoresLocal.add(new MoveScore(move, score));
if (score > bestScore) {
bestScore = score;
bestMove = move;
}
}
if (!useRandomisingTerm) {
latestMoveHeuristicEvaluation = new MoveHeuristicEvaluation(game, movesLocal, context, heuristicFunction, mover,
opponents(mover));
if (heuristicProportionView != null) {
heuristicProportionView.update(latestMoveHeuristicEvaluation, game, context);
}}
return new MoveScore(bestMove, bestScore);
}
public void makeMove() {
this.waitForUserAction = false;
}
public void makeMove(Move m) {
this.userSelectedMove = m;
this.waitForUserAction = false;
}
public void useRandomTerm(boolean useRandomTerm) {
this.randomizingTerm = useRandomTerm;
}
public void setHeuristicProportionView(HeuristicProportionViewInterface heuristicProportionView) {
this.heuristicProportionView = heuristicProportionView;
this.heuristicProportionView.addObserver(this);
}
}
| 16,458 | 29.42329 | 115 | java |
Ludii | Ludii-master/AI/src/search/flat/HeuristicSampling.java | package search.flat;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.AI;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.move.MoveScore;
/**
* Flat search that does heuristic sampling per turn, i.e. chooses T random moves
* and selects the one with the highest heuristic evaluation when applied.
*
* @author cambolbro and Dennis Soemers
*/
public class HeuristicSampling extends AI
{
//-------------------------------------------------------------------------
/** Score we give to winning opponents in paranoid searches in states where game is still going (> 2 players) */
protected static final float PARANOID_OPP_WIN_SCORE = 10000.f;
protected static final float WIN_SCORE = 10000.f;
/** We skip computing heuristics with absolute weight value lower than this */
public static final float ABS_HEURISTIC_WEIGHT_THRESHOLD = 0.01f;
/** Our heuristic value function estimator */
private Heuristics heuristicValueFunction = null;
/** If true, we read our heuristic function to use from game's metadata */
private final boolean heuristicsFromMetadata;
/** The number of players in the game we're currently playing */
protected int numPlayersInGame = 0;
/** Denominator of heuristic threshold fraction, i.e. 1/2, 1/4, 1/8, etc. */
private int fraction = 2; // FIXME why isn't this just a multiplier in (0.0, 1.0]?
/** Whether to apply same-turn continuation. */
private boolean continuation = true;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public HeuristicSampling()
{
heuristicsFromMetadata = true;
setFriendlyName();
}
/**
* Constructor
*/
public HeuristicSampling(final int fraction)
{
heuristicsFromMetadata = true;
this.fraction = fraction;
setFriendlyName();
}
/**
* Constructor
*/
public HeuristicSampling(final Heuristics heuristics)
{
heuristicValueFunction = heuristics;
heuristicsFromMetadata = false;
setFriendlyName();
}
/**
* Constructor
*/
public HeuristicSampling(final Heuristics heuristics, final int fraction)
{
heuristicValueFunction = heuristics;
heuristicsFromMetadata = false;
this.fraction = fraction;
setFriendlyName();
}
/**
* Constructor
* @param heuristicsFilepath
*/
public HeuristicSampling(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
final String heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction = (Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
heuristicsFromMetadata = false;
setFriendlyName();
}
//-------------------------------------------------------------------------
// Getters and Setters
public Heuristics heuristics()
{
return heuristicValueFunction;
}
public int threshold()
{
return fraction;
}
public void setThreshold(final int value)
{
fraction = value;
setFriendlyName();
}
public boolean continuation()
{
return continuation;
}
public void setContinuation(final boolean value)
{
continuation = value;
setFriendlyName();
}
//-------------------------------------------------------------------------
void setFriendlyName()
{
friendlyName = "HS (1/" + fraction + ")" + (continuation ? "*" : "");
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final MoveScore moveScore = evaluateMoves(game, context, 1);
final Move move = moveScore.move();
if (move == null)
System.out.println("** No best move.");
return move;
}
//-------------------------------------------------------------------------
MoveScore evaluateMoves(final Game game, final Context context, final int depth)
{
final FastArrayList<Move> moves = selectMoves(game, context, fraction, depth);
float bestScore = Float.NEGATIVE_INFINITY;
Move bestMove = moves.get(0);
final int mover = context.state().mover();
//Context contextCurrent = context;
for (final Move move : moves)
{
final Context contextCopy = new TempContext(context);
game.apply(contextCopy, move);
if (!contextCopy.active(mover))
{
if (contextCopy.winners().contains(mover))
return new MoveScore(move, WIN_SCORE); // Return winning move immediately
else if (contextCopy.losers().contains(mover))
continue; // Skip losing move
}
float score = 0;
if (!contextCopy.active(mover))
{
score = 0.f; // Must be a draw in this case
}
else if (continuation && contextCopy.state().mover() == mover && depth <= 10)
{
score = evaluateMoves(game, contextCopy, depth + 1).score();
}
else
{
score = heuristicValueFunction.computeValue
(
contextCopy, mover, ABS_HEURISTIC_WEIGHT_THRESHOLD
);
for (final int opp : opponents(mover))
{
if (contextCopy.active(opp))
score -= heuristicValueFunction.computeValue(contextCopy, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (contextCopy.winners().contains(opp))
score -= PARANOID_OPP_WIN_SCORE;
}
score += (float)(ThreadLocalRandom.current().nextInt(1000) / 1000000.0);
}
if (score > bestScore)
{
bestScore = score;
bestMove = move;
}
}
return new MoveScore(bestMove, bestScore);
}
//-------------------------------------------------------------------------
/**
* @param game Current game.
* @param context Current context.
* @param fraction Number of moves to select.
* @param depth Current depth in our little search
* @return Randomly chosen subset of moves.
*/
public static FastArrayList<Move> selectMoves(final Game game, final Context context, final int fraction, final int depth)
{
final FastArrayList<Move> playerMoves = new FastArrayList<Move>(game.moves(context).moves());
final FastArrayList<Move> selectedMoves = new FastArrayList<Move>();
// Some special stuff here to ensure we don't get stack overflow
double scalar = 1.0 / fraction;
final int minMoves = (depth < 3) ? 2 : 1;
if (depth >= 3)
scalar = 1.0 / (fraction * Math.pow(2.0, depth - 2));
final int target = Math.max(minMoves, (int) ((playerMoves.size() + 1) * scalar));
//System.out.println("selecting " + target + " out of " + playerMoves.size() + " moves at depth " + depth + " --- " + context.trial().over());
if (target >= playerMoves.size())
return playerMoves;
while (selectedMoves.size() < target)
{
final int r = ThreadLocalRandom.current().nextInt(playerMoves.size());
selectedMoves.add(playerMoves.get(r));
playerMoves.removeSwap(r);
}
return selectedMoves;
}
//-------------------------------------------------------------------------
/**
* @param player
* @return Opponents of given player
*/
public int[] opponents(final int player)
{
final int[] opponents = new int[numPlayersInGame - 1];
int idx = 0;
for (int p = 1; p <= numPlayersInGame; ++p)
{
if (p != player)
opponents[idx++] = p;
}
return opponents;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (heuristicsFromMetadata)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction = new Heuristics(new HeuristicTerm[]{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
});
}
}
if (heuristicValueFunction != null)
heuristicValueFunction.init(game);
numPlayersInGame = game.players().count();
}
@Override
public boolean supportsGame(final Game game)
{
if (game.players().count() <= 1)
return false;
// if (game.isStochasticGame())
// return false;
if (game.hiddenInformation())
return false;
return game.isAlternatingMoveGame();
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs a Heuristic Sampling object from instructions in the
* given array of lines
*/
public static HeuristicSampling fromLines(final String[] lines)
{
String friendlyName = "HeuristicSampling";
String heuristicsFilepath = null;
for (final String line : lines)
{
final String[] lineParts = line.split(",");
if (lineParts[0].toLowerCase().startsWith("heuristics="))
{
heuristicsFilepath = lineParts[0].substring("heuristics=".length());
}
else if (lineParts[0].toLowerCase().startsWith("friendly_name="))
{
friendlyName = lineParts[0].substring("friendly_name=".length());
}
}
HeuristicSampling heuristicSampling = null;
if (heuristicsFilepath != null)
{
try
{
heuristicSampling = new HeuristicSampling(heuristicsFilepath);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
if (heuristicSampling == null)
heuristicSampling = new HeuristicSampling();
heuristicSampling.friendlyName = friendlyName;
return heuristicSampling;
}
//-------------------------------------------------------------------------
}
| 10,010 | 25.206806 | 144 | java |
Ludii | Ludii-master/AI/src/search/flat/OnePlyNoHeuristic.java | package search.flat;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.collections.FastArrayList;
import other.AI;
import other.RankUtils;
import other.context.Context;
import other.move.Move;
/**
* One-ply search with no heuristics (only optimises for best ranking achievable
* in a single move, with random tie-breaking). For stochastic games, only randomly
* considers one outcome for every move.
*
* @author Dennis Soemers
*/
public class OnePlyNoHeuristic extends AI
{
//-------------------------------------------------------------------------
/** The number of players in the game we're currently playing */
protected int numPlayersInGame = 0;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public OnePlyNoHeuristic()
{
this.friendlyName = "One-Ply (No Heuristic)";
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final FastArrayList<Move> legalMoves = game.moves(context).moves();
final int agent = context.state().playerToAgent(context.state().mover());
double bestScore = Double.NEGATIVE_INFINITY;
final List<Move> bestMoves = new ArrayList<Move>();
final double utilLowerBound = RankUtils.rankToUtil(context.computeNextLossRank(), numPlayersInGame);
final double utilUpperBound = RankUtils.rankToUtil(context.computeNextWinRank(), numPlayersInGame);
for (final Move move : legalMoves)
{
game.apply(context, move);
final int player = context.state().currentPlayerOrder(agent);
final double score;
if (context.active(player))
{
// Still active, so just assume average between lower and upper bound
score = (utilLowerBound + utilUpperBound) / 2.0;
}
else
{
// Not active, so take actual utility
score = RankUtils.rankToUtil(context.trial().ranking()[player], numPlayersInGame);
}
if (score > bestScore)
bestMoves.clear();
if (score >= bestScore)
{
bestMoves.add(move);
bestScore = score;
}
game.undo(context);
}
return bestMoves.get(ThreadLocalRandom.current().nextInt(bestMoves.size()));
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
numPlayersInGame = game.players().count();
}
@Override
public boolean supportsGame(final Game game)
{
if (game.players().count() <= 1)
return false;
// if (game.isStochasticGame())
// return false;
if (game.hiddenInformation())
return false;
return game.isAlternatingMoveGame();
}
//-------------------------------------------------------------------------
}
| 2,927 | 23.813559 | 102 | java |
Ludii | Ludii-master/AI/src/search/flat/HeuristicSampleAdaptedUtils/HeuristicProportionViewInterface.java | package search.flat.HeuristicSampleAdaptedUtils;
import game.Game;
import other.context.Context;
import search.flat.HeuristicSampleAdapted;
import search.flat.HeuristicSampleAdapted.MoveHeuristicEvaluation;
/**
* used to allow classes of modul "distance" to access
* @author Markus
*
*/
public interface HeuristicProportionViewInterface {
void addObserver(HeuristicSampleAdapted heuristicSampleAdapted);
void update(MoveHeuristicEvaluation latestMoveHeuristicEvaluation, Game game, Context context);
}
| 516 | 22.5 | 96 | java |
Ludii | Ludii-master/AI/src/search/mcts/MCTS.java | package search.mcts;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.json.JSONObject;
import game.Game;
import game.types.state.GameType;
import main.DaemonThreadFactory;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.math.statistics.IncrementalStats;
import metadata.ai.features.Features;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.AI;
import other.RankUtils;
import other.context.Context;
import other.move.Move;
import other.state.State;
import other.trial.Trial;
import policies.Policy;
import policies.softmax.SoftmaxFromMetadataPlayout;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicyLinear;
import policies.softmax.SoftmaxPolicyLogitTree;
import search.mcts.backpropagation.AlphaGoBackprop;
import search.mcts.backpropagation.BackpropagationStrategy;
import search.mcts.backpropagation.HeuristicBackprop;
import search.mcts.backpropagation.MonteCarloBackprop;
import search.mcts.backpropagation.QualitativeBonus;
import search.mcts.finalmoveselection.FinalMoveSelectionStrategy;
import search.mcts.finalmoveselection.MaxAvgScore;
import search.mcts.finalmoveselection.ProportionalExpVisitCount;
import search.mcts.finalmoveselection.RobustChild;
import search.mcts.nodes.BaseNode;
import search.mcts.nodes.OpenLoopNode;
import search.mcts.nodes.ScoreBoundsNode;
import search.mcts.nodes.StandardNode;
import search.mcts.playout.HeuristicSampingPlayout;
import search.mcts.playout.PlayoutStrategy;
import search.mcts.playout.RandomPlayout;
import search.mcts.selection.AG0Selection;
import search.mcts.selection.NoisyAG0Selection;
import search.mcts.selection.ProgressiveBias;
import search.mcts.selection.ProgressiveHistory;
import search.mcts.selection.SelectionStrategy;
import search.mcts.selection.UCB1;
import search.mcts.selection.UCB1GRAVE;
import search.mcts.selection.UCB1Tuned;
import training.expert_iteration.ExItExperience;
import training.expert_iteration.ExpertPolicy;
import utils.AIUtils;
/**
* A modular implementation of Monte-Carlo Tree Search (MCTS) for playing games
* in Ludii.
*
* @author Dennis Soemers
*/
public class MCTS extends ExpertPolicy
{
//-------------------------------------------------------------------------
/**
* Different strategies for initializing Q(s, a) values (or V(s) values of
* nodes)
*
* @author Dennis Soemers
*/
public static enum QInit
{
/**
* Give unvisited nodes a very large value
* (actually 10,000 rather than infinity)
*/
INF,
/**
* Estimate the value of unvisited nodes as a loss (-1). This is a
* highly pessimistic value for unvisited nodes, and causes us to rely
* much more on prior distribution. Word on the street is that DeepMind
* does this in Alpha(Go) Zero.
*/
LOSS,
/**
* Estimate the value of unvisited nodes as a draw (0.0). This causes
* us to prioritise empirical wins over unvisited nodes.
*/
DRAW,
/**
* Estimate the value of unvisited nodes as a win (1). Very similar to
* INF, just a bit less extreme.
*/
WIN,
/**
* Estimate the value of unvisited nodes as the value estimate of the
* parent (with corrections for mover).
*/
PARENT,
}
//-------------------------------------------------------------------------
// Flags for things we want to do when expanding a node
/** Compute a heuristic-based value estimate for expanded nodes */
public final static int HEURISTIC_INIT = 0x1;
//-------------------------------------------------------------------------
// Basic members of MCTS
/** Root node of the last search process */
protected volatile BaseNode rootNode = null;
/** Implementation of Selection phase */
protected SelectionStrategy selectionStrategy;
/** Implementation of Play-out phase */
protected PlayoutStrategy playoutStrategy;
/** Implementation of Backpropagation of results through the tree */
protected BackpropagationStrategy backpropagationStrategy;
/** Algorithm to select move to play in the "real" game after searching */
protected FinalMoveSelectionStrategy finalMoveSelectionStrategy;
/** Strategy for init of Q-values for unvisited nodes. */
protected QInit qInit = QInit.PARENT;
/** Flags indicating what data needs to be backpropagated */
protected int backpropFlags = 0;
/** Flags indicating things we want to do when expanding a node */
protected int expansionFlags = 0;
/** We'll automatically return our move after at most this number of seconds if we only have one move */
protected double autoPlaySeconds = 0.0; // TODO allow customisation
/** Our thread pool for tree parallelisation */
private ExecutorService threadPool = null;
/** Number of threads this MCTS should use for parallel iterations */
private int numThreads = 1;
/** Lets us track whether all threads in our thread pool have completely finished */
private AtomicInteger numThreadsBusy = new AtomicInteger(0);
//-------------------------------------------------------------------------
/** State flags of the game we're currently playing */
protected long currentGameFlags = 0;
/**
* We'll memorise the number of iterations we have executed in our last
* search here
*/
protected int lastNumMctsIterations = -1;
/**
* We'll memorise the number of actions we have executed in play-outs
* during our last search here
*/
protected int lastNumPlayoutActions = -1;
/**
* Value estimate of the last move we returned
*/
protected double lastReturnedMoveValueEst = 0.0;
/** String to print to Analysis tab of the Ludii app */
protected String analysisReport = null;
/**
* If true, we preserve our root node after running search. Will increase memory usage,
* but allows us to use it to access data afterwards (for instance for training algorithms)
*/
protected boolean preserveRootNode = false;
//-------------------------------------------------------------------------
// Following members are related to and/or required because of Tree Reuse
/**
* Whether or not to reuse trees generated in previous
* searches in the same game
*/
protected boolean treeReuse = true;
/**
* Need to memorise this such that we know which parts of the tree to
* traverse to before starting Tree Reuse
*/
protected int lastActionHistorySize = 0;
/** Decay factor for global action statistics when reusing trees */
protected final double globalActionDecayFactor = 0.6;
//-------------------------------------------------------------------------
/** A learned policy to use in Selection phase */
protected Policy learnedSelectionPolicy = null;
/** Do we want to load heuristics from metadata on init? */
protected boolean wantsMetadataHeuristics = false;
/** Do we want to track pessimistic and optimistic score bounds in nodes, for solving? */
protected boolean useScoreBounds = false;
/**
* If we have heuristic value estimates in nodes, we assign this weight to playout outcomes,
* and 1 minus this weight to the value estimate of node before playout.
*
* TODO can move this into the AlphaGoBackprop class I think
*
* 1.0 --> normal MCTS
* 0.5 --> AlphaGo
* 0.0 --> AlphaGo Zero
*/
protected double playoutValueWeight = 1.0;
//-------------------------------------------------------------------------
/** Table of global (MCTS-wide) action stats (e.g., for Progressive History) */
protected final Map<MoveKey, ActionStatistics> globalActionStats;
/** Table of global (MCTS-wide) N-gram action stats (e.g., for NST) */
protected final Map<NGramMoveKey, ActionStatistics> globalNGramActionStats;
/** Max length of N-grams of actions we consider */
protected final int maxNGramLength;
/** For every player, a global MCTS-wide tracker of statistics on heuristics */
protected IncrementalStats[] heuristicStats = null;
//-------------------------------------------------------------------------
/**
* Global flag telling us whether we want MCTS objects to null (clear) undo
* data in Trial objects stored in their nodes. True by default, since
* usually we want to do this to reduce memory usage.
*
* Sometimes in self-play training this causes issues though, and there
* we typically don't worry about the memory usage anyway since we tend
* to have rather short and shallow searches, so we can set this to false.
*/
public static boolean NULL_UNDO_DATA = true;
//-------------------------------------------------------------------------
/**
* Creates standard UCT algorithm, with exploration constant = sqrt(2.0)
* @return UCT agent
*/
public static MCTS createUCT()
{
return createUCT(Math.sqrt(2.0));
}
/**
* Creates standard UCT algorithm with parameter for
* UCB1's exploration constant
*
* @param explorationConstant
* @return UCT agent
*/
public static MCTS createUCT(final double explorationConstant)
{
final MCTS uct =
new MCTS
(
new UCB1(explorationConstant),
new RandomPlayout(200),
new MonteCarloBackprop(),
new RobustChild()
);
uct.friendlyName = "UCT";
return uct;
}
/**
* Creates a Biased MCTS agent which attempts to use features and
* weights embedded in a game's metadata file.
* @param epsilon Epsilon for epsilon-greedy feature-based playouts. 1 for uniform, 0 for always softmax
* @return Biased MCTS agent
*/
public static MCTS createBiasedMCTS(final double epsilon)
{
final MCTS mcts =
new MCTS
(
new NoisyAG0Selection(),
epsilon < 1.0 ? new SoftmaxFromMetadataPlayout(epsilon) : new RandomPlayout(200),
new MonteCarloBackprop(),
new RobustChild()
);
mcts.setQInit(QInit.WIN);
mcts.setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(epsilon));
mcts.friendlyName = epsilon < 1.0 ? "Biased MCTS" : "Biased MCTS (Uniform Playouts)";
return mcts;
}
/**
* Creates a Biased MCTS agent using given collection of features
*
* @param features
* @param epsilon Epsilon for epsilon-greedy feature-based playouts. 1 for uniform, 0 for always softmax
* @return Biased MCTS agent
*/
public static MCTS createBiasedMCTS(final Features features, final double epsilon)
{
final MCTS mcts =
new MCTS
(
new NoisyAG0Selection(),
epsilon < 1.0 ? SoftmaxPolicyLinear.constructPlayoutPolicy(features, epsilon) : new RandomPlayout(200),
new MonteCarloBackprop(),
new RobustChild()
);
mcts.setQInit(QInit.WIN);
mcts.setLearnedSelectionPolicy(SoftmaxPolicyLinear.constructSelectionPolicy(features, epsilon));
mcts.friendlyName = epsilon < 1.0 ? "Biased MCTS" : "Biased MCTS (Uniform Playouts)";
return mcts;
}
/**
* Creates a Hybrid MCTS agent which attempts to use heuristics in a game's metadata file.
* @return Hybrid MCTS agent
*/
public static MCTS createHybridMCTS()
{
final MCTS mcts =
new MCTS
(
new UCB1(Math.sqrt(2.0)),
new HeuristicSampingPlayout(),
new AlphaGoBackprop(),
new RobustChild()
);
mcts.setWantsMetadataHeuristics(true);
mcts.setPlayoutValueWeight(0.5);
mcts.friendlyName = "MCTS (Hybrid Selection)";
return mcts;
}
/**
* Creates a Bandit Tree Search using heuristic to guide the search but no playout.
* @return Bandit Tree Search agent
*/
public static MCTS createBanditTreeSearch()
{
final MCTS mcts =
new MCTS
(
new UCB1(Math.sqrt(2.0)),
new RandomPlayout(0),
new AlphaGoBackprop(),
new RobustChild()
);
mcts.setWantsMetadataHeuristics(true);
mcts.setPlayoutValueWeight(0.0);
mcts.friendlyName = "Bandit Tree Search (Avg)";
return mcts;
}
/**
* Creates a Policy-Value Tree Search agent, using features for policy and heuristics
* for value function.
*
* @param features
* @param heuristics
* @return Policy-Value Tree Search agent
*/
public static MCTS createPVTS(final Features features, final Heuristics heuristics)
{
final MCTS mcts =
new MCTS
(
new NoisyAG0Selection(),
new RandomPlayout(0),
new AlphaGoBackprop(),
new RobustChild()
);
mcts.setLearnedSelectionPolicy(SoftmaxPolicyLinear.constructSelectionPolicy(features, 0.0));
mcts.setPlayoutValueWeight(0.0);
mcts.setWantsMetadataHeuristics(false);
mcts.setHeuristics(heuristics);
mcts.friendlyName = "PVTS";
return mcts;
}
//-------------------------------------------------------------------------
/**
* Constructor with arguments for all strategies
* @param selectionStrategy
* @param playoutStrategy
* @param finalMoveSelectionStrategy
*/
public MCTS
(
final SelectionStrategy selectionStrategy,
final PlayoutStrategy playoutStrategy,
final BackpropagationStrategy backpropagationStrategy,
final FinalMoveSelectionStrategy finalMoveSelectionStrategy
)
{
this.selectionStrategy = selectionStrategy;
this.playoutStrategy = playoutStrategy;
this.backpropagationStrategy = backpropagationStrategy;
backpropFlags = selectionStrategy.backpropFlags() | playoutStrategy.backpropFlags();
expansionFlags = selectionStrategy.expansionFlags();
this.backpropagationStrategy.setBackpropFlags(backpropFlags);
backpropFlags = backpropFlags | this.backpropagationStrategy.backpropagationFlags();
this.finalMoveSelectionStrategy = finalMoveSelectionStrategy;
if ((backpropFlags & BackpropagationStrategy.GLOBAL_ACTION_STATS) != 0)
globalActionStats = new ConcurrentHashMap<MoveKey, ActionStatistics>();
else
globalActionStats = null;
if ((backpropFlags & BackpropagationStrategy.GLOBAL_NGRAM_ACTION_STATS) != 0)
{
globalNGramActionStats = new ConcurrentHashMap<NGramMoveKey, ActionStatistics>();
maxNGramLength = 3; // Hardcoded to 3 for now, should make it a param...
}
else
{
globalNGramActionStats = null;
maxNGramLength = 0;
}
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
final int maxIts = (maxIterations >= 0) ? maxIterations : Integer.MAX_VALUE;
while (numThreadsBusy.get() != 0 && System.currentTimeMillis() < Math.min(stopTime, startTime + 1000L))
{
// Give threads in thread pool some more time to clean up after themselves from previous iteration
}
// We'll assume all threads are really done now and just reset to 0
numThreadsBusy.set(0);
final AtomicInteger numIterations = new AtomicInteger();
// Find or create root node
if (treeReuse && rootNode != null)
{
// Want to reuse part of existing search tree
// Need to traverse parts of old tree corresponding to
// actions played in the real game
final List<Move> actionHistory = context.trial().generateCompleteMovesList();
int offsetActionToTraverse = actionHistory.size() - lastActionHistorySize;
if (offsetActionToTraverse < 0)
{
// Something strange happened, probably forgot to call
// initAI() for a newly-started game. Won't be a good
// idea to reuse tree anyway
rootNode = null;
}
while (offsetActionToTraverse > 0)
{
final Move move = actionHistory.get(actionHistory.size() - offsetActionToTraverse);
rootNode = rootNode.findChildForMove(move);
if (rootNode == null)
{
// Didn't have a node in tree corresponding to action
// played, so can't reuse tree
break;
}
--offsetActionToTraverse;
}
}
if (rootNode == null || !treeReuse)
{
// Need to create a fresh root
rootNode = createNode(this, null, null, null, context);
//System.out.println("NO TREE REUSE");
}
else
{
//System.out.println("successful tree reuse");
// We're reusing a part of previous search tree
// Clean up unused parts of search tree from memory
rootNode.setParent(null);
// TODO in nondeterministic games + OpenLoop MCTS, we'll want to
// decay statistics gathered in the entire subtree here
}
if (globalActionStats != null)
{
// Decay global action statistics
final Set<Entry<MoveKey, ActionStatistics>> entries = globalActionStats.entrySet();
final Iterator<Entry<MoveKey, ActionStatistics>> it = entries.iterator();
while (it.hasNext())
{
final Entry<MoveKey, ActionStatistics> entry = it.next();
final ActionStatistics stats = entry.getValue();
stats.visitCount *= globalActionDecayFactor;
if (stats.visitCount < 1.0)
it.remove();
else
stats.accumulatedScore *= globalActionDecayFactor;
}
}
if (globalNGramActionStats != null)
{
// Decay global N-gram action statistics
final Set<Entry<NGramMoveKey, ActionStatistics>> entries = globalNGramActionStats.entrySet();
final Iterator<Entry<NGramMoveKey, ActionStatistics>> it = entries.iterator();
while (it.hasNext())
{
final Entry<NGramMoveKey, ActionStatistics> entry = it.next();
final ActionStatistics stats = entry.getValue();
stats.visitCount *= globalActionDecayFactor;
if (stats.visitCount < 1.0)
it.remove();
else
stats.accumulatedScore *= globalActionDecayFactor;
}
}
if (heuristicStats != null)
{
// Clear all heuristic stats
for (int p = 1; p < heuristicStats.length; ++p)
{
heuristicStats[p].init(0, 0.0, 0.0);
}
}
rootNode.rootInit(context);
if (rootNode.numLegalMoves() == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
lastActionHistorySize = context.trial().numMoves();
lastNumPlayoutActions = 0; // TODO if this variable actually becomes important, may want to make it Atomic
// Store this in a separate variable because threading weirdness sometimes sets the class variable to null
// even though some threads here still want to do something with it.
final BaseNode rootThisCall = rootNode;
// For each thread, queue up a job
final CountDownLatch latch = new CountDownLatch(numThreads);
final long finalStopTime = stopTime; // Need this to be final for use in inner lambda
for (int thread = 0; thread < numThreads; ++thread)
{
threadPool.submit
(
() ->
{
try
{
numThreadsBusy.incrementAndGet();
// Search until we have to stop
while (numIterations.get() < maxIts && System.currentTimeMillis() < finalStopTime && !wantsInterrupt)
{
/*********************
Selection Phase
*********************/
BaseNode current = rootThisCall;
current.addVirtualVisit();
current.startNewIteration(context);
Context playoutContext = null;
while (current.contextRef().trial().status() == null)
{
BaseNode prevNode = current;
prevNode.getLock().lock();
try
{
final int selectedIdx = selectionStrategy.select(this, current);
BaseNode nextNode = current.childForNthLegalMove(selectedIdx);
final Context newContext = current.traverse(selectedIdx);
if (nextNode == null)
{
/*********************
Expand
*********************/
nextNode =
createNode
(
this,
current,
newContext.trial().lastMove(),
current.nthLegalMove(selectedIdx),
newContext
);
current.addChild(nextNode, selectedIdx);
current = nextNode;
current.addVirtualVisit();
current.updateContextRef();
if ((expansionFlags & HEURISTIC_INIT) != 0)
{
assert (heuristicFunction != null);
nextNode.setHeuristicValueEstimates
(
AIUtils.heuristicValueEstimates(nextNode.playoutContext(), heuristicFunction)
);
}
playoutContext = current.playoutContext();
break; // stop Selection phase
}
current = nextNode;
current.addVirtualVisit();
current.updateContextRef();
}
catch (final ArrayIndexOutOfBoundsException e)
{
System.err.println(describeMCTS());
throw e;
}
finally
{
prevNode.getLock().unlock();
}
}
Trial endTrial = current.contextRef().trial();
int numPlayoutActions = 0;
if (!endTrial.over() && playoutValueWeight > 0.0)
{
// Did not reach a terminal game state yet
/********************************
Play-out
********************************/
final int numActionsBeforePlayout = current.contextRef().trial().numMoves();
endTrial = playoutStrategy.runPlayout(this, playoutContext);
numPlayoutActions = (endTrial.numMoves() - numActionsBeforePlayout);
lastNumPlayoutActions +=
(playoutContext.trial().numMoves() - numActionsBeforePlayout);
}
else
{
// Reached a terminal game state
playoutContext = current.contextRef();
}
/***************************
Backpropagation Phase
***************************/
final double[] outcome = RankUtils.agentUtilities(playoutContext);
backpropagationStrategy.update(this, current, playoutContext, outcome, numPlayoutActions);
numIterations.incrementAndGet();
}
rootThisCall.cleanThreadLocals();
}
catch (final Exception e)
{
System.err.println("MCTS error in game: " + context.game().name());
e.printStackTrace(); // Need to do this here since we don't retrieve runnable's Future result
}
finally
{
numThreadsBusy.decrementAndGet();
latch.countDown();
}
}
);
}
try
{
latch.await(stopTime - startTime + 2000L, TimeUnit.MILLISECONDS);
}
catch (final InterruptedException e)
{
e.printStackTrace();
}
lastNumMctsIterations = numIterations.get();
final Move returnMove = finalMoveSelectionStrategy.selectMove(this, rootThisCall);
int playedChildIdx = -1;
if (!wantsInterrupt)
{
int moveVisits = -1;
for (int i = 0; i < rootThisCall.numLegalMoves(); ++i)
{
final BaseNode child = rootThisCall.childForNthLegalMove(i);
if (child != null)
{
if (rootThisCall.nthLegalMove(i).equals(returnMove))
{
final State state = rootThisCall.deterministicContextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
moveVisits = child.numVisits();
lastReturnedMoveValueEst = child.expectedScore(moverAgent);
playedChildIdx = i;
break;
}
}
}
final int numRootIts = rootThisCall.numVisits();
analysisReport =
friendlyName +
" made move after " +
numRootIts +
" iterations (selected child visits = " +
moveVisits +
", value = " +
lastReturnedMoveValueEst +
").";
}
else
{
analysisReport = null;
}
// We can already try to clean up a bit of memory here
// NOTE: from this point on we have to use rootNode instead of rootThisCall again!
if (!preserveRootNode)
{
if (!treeReuse)
{
rootNode = null; // clean up entire search tree
}
else if (!wantsInterrupt) // only clean up if we didn't pause the AI / interrupt it
{
if (playedChildIdx >= 0)
rootNode = rootThisCall.childForNthLegalMove(playedChildIdx);
else
rootNode = null;
if (rootNode != null)
{
rootNode.setParent(null);
++lastActionHistorySize;
}
}
}
//System.out.println(numIterations + " MCTS iterations");
return returnMove;
}
/**
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param context
* @return New node
*/
protected BaseNode createNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Context context
)
{
if ((currentGameFlags & GameType.Stochastic) == 0L || wantsCheatRNG())
{
if (useScoreBounds)
return new ScoreBoundsNode(mcts, parent, parentMove, parentMoveWithoutConseq, context);
else
return new StandardNode(mcts, parent, parentMove, parentMoveWithoutConseq, context);
}
else
{
return new OpenLoopNode(mcts, parent, parentMove, parentMoveWithoutConseq, context.game());
}
}
//-------------------------------------------------------------------------
/**
* Sets number of seconds after which we auto-play if we only have one legal move.
* @param seconds
*/
public void setAutoPlaySeconds(final double seconds)
{
autoPlaySeconds = seconds;
}
/**
* Set whether or not to reuse tree from previous search processes
* @param treeReuse
*/
public void setTreeReuse(final boolean treeReuse)
{
this.treeReuse = treeReuse;
}
/**
* Set the number of threads to use for Tree Parallelisation
* @param numThreads
*/
public void setNumThreads(final int numThreads)
{
this.numThreads = numThreads;
}
//-------------------------------------------------------------------------
/**
* @return Flags indicating what data we need to backpropagate
*/
public int backpropFlags()
{
return backpropFlags;
}
/**
* @return Learned (linear or tree) policy for Selection phase
*/
public Policy learnedSelectionPolicy()
{
return learnedSelectionPolicy;
}
/**
* @return Max length of N-grams of actions for which we collect statistics
*/
public int maxNGramLength()
{
return maxNGramLength;
}
/**
* @return Heuristics used by MCTS
*/
public Heuristics heuristics()
{
return heuristicFunction;
}
/**
* @return Play-out strategy used by this MCTS object
*/
public PlayoutStrategy playoutStrategy()
{
return playoutStrategy;
}
/**
* @return Init strategy for Q-values of unvisited nodes
*/
public QInit qInit()
{
return qInit;
}
/**
* @return Current root node
*/
public BaseNode rootNode()
{
return rootNode;
}
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final Policy policy)
{
learnedSelectionPolicy = policy;
}
/**
* Sets whether we want to load heuristics from metadata
* @param val The value.
*/
public void setWantsMetadataHeuristics(final boolean val)
{
wantsMetadataHeuristics = val;
}
/**
* Sets whether we want to use pessimistic and optimistic score bounds for solving nodes
* @param val
*/
public void setUseScoreBounds(final boolean val)
{
useScoreBounds = val;
}
/**
* Sets the Q-init strategy
* @param init
*/
public void setQInit(final QInit init)
{
qInit = init;
}
/**
* Sets whether we want to preserve root node after running search
* @param preserveRootNode
*/
public void setPreserveRootNode(final boolean preserveRootNode)
{
this.preserveRootNode = preserveRootNode;
}
/**
* Sets the weight to use for playout value estimates
* @param playoutValueWeight
*/
public void setPlayoutValueWeight(final double playoutValueWeight)
{
if (playoutValueWeight < 0.0)
{
this.playoutValueWeight = 0.0;
System.err.println("MCTS playoutValueWeight cannot be lower than 0.0!");
}
else if (playoutValueWeight > 1.0)
{
this.playoutValueWeight = 1.0;
System.err.println("MCTS playoutValueWeight cannot be greater than 1.0!");
}
else
{
this.playoutValueWeight = playoutValueWeight;
}
if (this.playoutValueWeight < 1.0) // We'll need heuristic values in nodes
expansionFlags = expansionFlags | HEURISTIC_INIT;
}
/**
* If we have heuristic value estimates in nodes, we assign this weight to playout outcomes,
* and 1 minus this weight to the value estimate of node before playout.
*
* 1.0 --> normal MCTS
* 0.5 --> AlphaGo
* 0.0 --> AlphaGo Zero
*
* @return The weight
*/
public double playoutValueWeight() // TODO probably this should become a property of AlphaGoBackprop
{
return playoutValueWeight;
}
/**
* @return Array of incremental stat trackers for heuristics (one per player)
*/
public IncrementalStats[] heuristicStats()
{
return heuristicStats;
}
//-------------------------------------------------------------------------
/**
* @return Number of MCTS iterations performed during our last search
*/
public int getNumMctsIterations()
{
return lastNumMctsIterations;
}
/**
* @return Number of actions executed in play-outs during our last search
*/
public int getNumPlayoutActions()
{
return lastNumPlayoutActions;
}
//-------------------------------------------------------------------------
@Override
public boolean usesFeatures(final Game game)
{
return (learnedSelectionPolicy != null || playoutStrategy instanceof SoftmaxPolicyLinear);
}
@Override
public void initAI(final Game game, final int playerID)
{
// Store state flags
currentGameFlags = game.gameFlags();
// Reset counters
lastNumMctsIterations = -1;
lastNumPlayoutActions = -1;
// Reset tree reuse stuff
rootNode = null;
lastActionHistorySize = 0;
// Instantiate feature sets for selection policy
if (learnedSelectionPolicy != null)
{
learnedSelectionPolicy.initAI(game, playerID);
}
// May also have to instantiate feature sets for Playout policy if it doubles as an AI
if (playoutStrategy instanceof AI)
{
if (playoutStrategy != learnedSelectionPolicy)
{
final AI aiPlayout = (AI) playoutStrategy;
aiPlayout.initAI(game, playerID);
}
}
// Init heuristics
if (wantsMetadataHeuristics)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicFunction =
new Heuristics
(
new HeuristicTerm[]
{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
}
);
}
}
if (heuristicFunction != null)
heuristicFunction.init(game);
// Reset visualisation stuff
lastReturnedMoveValueEst = 0.0;
analysisReport = null;
// Completely clear any global action statistics
if (globalActionStats != null)
globalActionStats.clear();
if (globalNGramActionStats != null)
globalNGramActionStats.clear();
if ((backpropFlags & BackpropagationStrategy.GLOBAL_HEURISTIC_STATS) != 0)
{
heuristicStats = new IncrementalStats[game.players().count() + 1];
for (int p = 1; p < heuristicStats.length; ++p)
{
heuristicStats[p] = new IncrementalStats();
}
}
else
{
heuristicStats = null;
}
if (threadPool != null)
threadPool.shutdownNow();
threadPool = Executors.newFixedThreadPool(numThreads, DaemonThreadFactory.INSTANCE);
}
@Override
public void closeAI()
{
// This may help to clean up some memory
rootNode = null;
// Close trained selection policy
if (learnedSelectionPolicy != null)
{
learnedSelectionPolicy.closeAI();
}
// May also have to close Playout policy if it doubles as an AI
if (playoutStrategy instanceof AI)
{
if (playoutStrategy != learnedSelectionPolicy)
{
final AI aiPlayout = (AI) playoutStrategy;
aiPlayout.closeAI();
}
}
if (threadPool != null)
{
threadPool.shutdownNow();
try
{
threadPool.awaitTermination(200L, TimeUnit.MILLISECONDS);
}
catch (final InterruptedException e)
{
e.printStackTrace();
}
threadPool = null;
}
}
@Override
public boolean supportsGame(final Game game)
{
final long gameFlags = game.gameFlags();
// this MCTS implementation does not support simultaneous-move games
if ((gameFlags & GameType.Simultaneous) != 0L)
return false;
if (learnedSelectionPolicy != null && !learnedSelectionPolicy.supportsGame(game))
return false;
return playoutStrategy.playoutSupportsGame(game);
}
@Override
public double estimateValue()
{
return lastReturnedMoveValueEst;
}
@Override
public String generateAnalysisReport()
{
return analysisReport;
}
@Override
public AIVisualisationData aiVisualisationData()
{
if (rootNode == null)
return null;
if (rootNode.numVisits() == 0)
return null;
if (rootNode.deterministicContextRef() == null)
return null;
final int numChildren = rootNode.numLegalMoves();
final FVector aiDistribution = new FVector(numChildren);
final FVector valueEstimates = new FVector(numChildren);
final FastArrayList<Move> moves = new FastArrayList<>();
final State state = rootNode.deterministicContextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = rootNode.childForNthLegalMove(i);
if (child == null)
{
aiDistribution.set(i, 0);
if (rootNode.numVisits() == 0)
valueEstimates.set(i, 0.f);
else
valueEstimates.set(i, (float) rootNode.valueEstimateUnvisitedChildren(moverAgent));
}
else
{
aiDistribution.set(i, child.numVisits());
valueEstimates.set(i, (float) child.expectedScore(moverAgent));
}
if (valueEstimates.get(i) > 1.f)
valueEstimates.set(i, 1.f);
else if (valueEstimates.get(i) < -1.f)
valueEstimates.set(i, -1.f);
moves.add(rootNode.nthLegalMove(i));
}
return new AIVisualisationData(aiDistribution, valueEstimates, moves);
}
//-------------------------------------------------------------------------
/**
* @param moveKey
* @return global MCTS-wide action statistics for given move key
*/
public ActionStatistics getOrCreateActionStatsEntry(final MoveKey moveKey)
{
ActionStatistics stats = globalActionStats.get(moveKey);
if (stats == null)
{
stats = new ActionStatistics();
globalActionStats.put(moveKey, stats);
//System.out.println("creating entry for " + moveKey + " in " + this);
}
return stats;
}
/**
* @param nGramMoveKey
* @return global MCTS-wide N-gram action statistics for given N-gram move key,
* or null if it doesn't exist yet
*/
public ActionStatistics getNGramActionStatsEntry(final NGramMoveKey nGramMoveKey)
{
return globalNGramActionStats.get(nGramMoveKey);
}
/**
* @param nGramMoveKey
* @return global MCTS-wide N-gram action statistics for given N-gram move key
*/
public ActionStatistics getOrCreateNGramActionStatsEntry(final NGramMoveKey nGramMoveKey)
{
ActionStatistics stats = globalNGramActionStats.get(nGramMoveKey);
if (stats == null)
{
stats = new ActionStatistics();
globalNGramActionStats.put(nGramMoveKey, stats);
//System.out.println("creating entry for " + nGramMoveKey + " in " + this);
}
return stats;
}
//-------------------------------------------------------------------------
/**
* @param json
* @return MCTS agent constructed from given JSON object
*/
public static MCTS fromJson(final JSONObject json)
{
final SelectionStrategy selection =
SelectionStrategy.fromJson(json.getJSONObject("selection"));
final PlayoutStrategy playout =
PlayoutStrategy.fromJson(json.getJSONObject("playout"));
final BackpropagationStrategy backprop =
BackpropagationStrategy.fromJson(json.getJSONObject("backpropagation"));
final FinalMoveSelectionStrategy finalMove =
FinalMoveSelectionStrategy.fromJson(json.getJSONObject("final_move"));
final MCTS mcts = new MCTS(selection, playout, backprop, finalMove);
if (json.has("tree_reuse"))
{
mcts.setTreeReuse(json.getBoolean("tree_reuse"));
}
if (json.has("friendly_name"))
{
mcts.friendlyName = json.getString("friendly_name");
}
return mcts;
}
//-------------------------------------------------------------------------
@Override
public FastArrayList<Move> lastSearchRootMoves()
{
return rootNode.movesFromNode();
}
@Override
public FVector computeExpertPolicy(final double tau)
{
return rootNode.computeVisitCountPolicy(tau);
}
@Override
public List<ExItExperience> generateExItExperiences()
{
return rootNode.generateExItExperiences();
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs an MCTS object from instructions in the
* given array of lines
*/
public static MCTS fromLines(final String[] lines)
{
// Defaults - main parts
SelectionStrategy selection = new UCB1();
PlayoutStrategy playout = new RandomPlayout(200);
BackpropagationStrategy backprop = new MonteCarloBackprop();
FinalMoveSelectionStrategy finalMove = new RobustChild();
// Defaults - some extras
boolean treeReuse = false;
boolean useScoreBounds = false;
int numThreads = 1;
Policy learnedSelectionPolicy = null;
Heuristics heuristics = null;
QInit qinit = QInit.PARENT;
String friendlyName = "MCTS";
double playoutValueWeight = 1.0;
for (String line : lines)
{
final String[] lineParts = line.split(",");
//-----------------------------------------------------------------
// Main parts
//-----------------------------------------------------------------
if (lineParts[0].toLowerCase().startsWith("selection="))
{
if (lineParts[0].toLowerCase().endsWith("ucb1"))
{
selection = new UCB1();
selection.customise(lineParts);
}
else if
(
lineParts[0].toLowerCase().endsWith("ag0selection") ||
lineParts[0].toLowerCase().endsWith("alphago0selection")
)
{
selection = new AG0Selection();
selection.customise(lineParts);
}
else if
(
lineParts[0].toLowerCase().endsWith("noisyag0selection") ||
lineParts[0].toLowerCase().endsWith("noisyalphago0selection")
)
{
selection = new NoisyAG0Selection();
selection.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("progressivebias"))
{
selection = new ProgressiveBias();
selection.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("progressivehistory"))
{
selection = new ProgressiveHistory();
selection.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("ucb1grave"))
{
selection = new UCB1GRAVE();
selection.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("ucb1tuned"))
{
selection = new UCB1Tuned();
selection.customise(lineParts);
}
else
{
System.err.println("Unknown selection strategy: " + line);
}
}
else if (lineParts[0].toLowerCase().startsWith("playout="))
{
playout = PlayoutStrategy.constructPlayoutStrategy(lineParts);
}
else if (lineParts[0].toLowerCase().startsWith("backprop="))
{
if (lineParts[0].toLowerCase().endsWith("alphago"))
{
backprop = new AlphaGoBackprop();
}
else if (lineParts[0].toLowerCase().endsWith("heuristic"))
{
backprop = new HeuristicBackprop();
}
else if (lineParts[0].toLowerCase().endsWith("montecarlo"))
{
backprop = new MonteCarloBackprop();
}
else if (lineParts[0].toLowerCase().endsWith("qualitativebonus"))
{
backprop = new QualitativeBonus();
}
}
else if (lineParts[0].toLowerCase().startsWith("final_move="))
{
if (lineParts[0].toLowerCase().endsWith("maxavgscore"))
{
finalMove = new MaxAvgScore();
finalMove.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("robustchild"))
{
finalMove = new RobustChild();
finalMove.customise(lineParts);
}
else if
(
lineParts[0].toLowerCase().endsWith("proportional") ||
lineParts[0].toLowerCase().endsWith("proportionalexpvisitcount")
)
{
finalMove = new ProportionalExpVisitCount(1.0);
finalMove.customise(lineParts);
}
else
{
System.err.println("Unknown final move selection strategy: " + line);
}
}
//-----------------------------------------------------------------
// Extras
//-----------------------------------------------------------------
else if (lineParts[0].toLowerCase().startsWith("tree_reuse="))
{
if (lineParts[0].toLowerCase().endsWith("true"))
{
treeReuse = true;
}
else if (lineParts[0].toLowerCase().endsWith("false"))
{
treeReuse = false;
}
else
{
System.err.println("Error in line: " + line);
}
}
else if (lineParts[0].toLowerCase().startsWith("use_score_bounds="))
{
if (lineParts[0].toLowerCase().endsWith("true"))
{
useScoreBounds = true;
}
else if (lineParts[0].toLowerCase().endsWith("false"))
{
useScoreBounds = false;
}
else
{
System.err.println("Error in line: " + line);
}
}
else if (lineParts[0].toLowerCase().startsWith("num_threads="))
{
numThreads = Integer.parseInt(lineParts[0].substring("num_threads=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("learned_selection_policy="))
{
if (lineParts[0].toLowerCase().endsWith("playout"))
{
// our playout strategy is our learned Selection policy
learnedSelectionPolicy = (Policy) playout;
}
else if
(
lineParts[0].toLowerCase().endsWith("softmax")
||
lineParts[0].toLowerCase().endsWith("softmaxplayout")
||
lineParts[0].toLowerCase().endsWith("softmaxlinear")
)
{
learnedSelectionPolicy = new SoftmaxPolicyLinear();
learnedSelectionPolicy.customise(lineParts);
}
else if (lineParts[0].toLowerCase().endsWith("softmaxlogittree"))
{
learnedSelectionPolicy = new SoftmaxPolicyLogitTree();
learnedSelectionPolicy.customise(lineParts);
}
}
else if (lineParts[0].toLowerCase().startsWith("heuristics="))
{
heuristics = Heuristics.fromLines(lineParts);
}
else if (lineParts[0].toLowerCase().startsWith("qinit="))
{
qinit = QInit.valueOf(lineParts[0].substring("qinit=".length()).toUpperCase());
}
else if (lineParts[0].toLowerCase().startsWith("playout_value_weight="))
{
playoutValueWeight = Double.parseDouble(lineParts[0].substring("playout_value_weight=".length()));
}
else if (lineParts[0].toLowerCase().startsWith("friendly_name="))
{
friendlyName = lineParts[0].substring("friendly_name=".length());
}
}
MCTS mcts = new MCTS(selection, playout, backprop, finalMove);
mcts.setTreeReuse(treeReuse);
mcts.setUseScoreBounds(useScoreBounds);
mcts.setNumThreads(numThreads);
mcts.setLearnedSelectionPolicy(learnedSelectionPolicy);
mcts.setHeuristics(heuristics);
mcts.setQInit(qinit);
mcts.setPlayoutValueWeight(playoutValueWeight);
mcts.friendlyName = friendlyName;
return mcts;
}
//-------------------------------------------------------------------------
/**
* @return A string describing our MCTS configuration
*/
public String describeMCTS()
{
final StringBuilder sb = new StringBuilder();
sb.append("Selection = " + selectionStrategy + "\n");
sb.append("Playout = " + playoutStrategy + "\n");
sb.append("Backprop = " + backpropagationStrategy + "\n");
sb.append("friendly name = " + friendlyName + "\n");
sb.append("tree reuse = " + treeReuse + "\n");
sb.append("use score bounds = " + useScoreBounds + "\n");
sb.append("qinit = " + qInit + "\n");
sb.append("playout value weight = " + playoutValueWeight + "\n");
sb.append("final move selection = " + finalMoveSelectionStrategy + "\n");
sb.append("heuristics:\n");
sb.append(heuristicFunction + "\n");
return sb.toString();
}
//-------------------------------------------------------------------------
/**
* Wrapper class for global (MCTS-wide) action statistics
* (accumulated scores + visit count)
*
* @author Dennis Soemers
*/
public static class ActionStatistics
{
/** Visit count (not int because we want to be able to decay) */
public double visitCount = 0.0;
/** Accumulated score */
public double accumulatedScore = 0.0;
@Override
public String toString()
{
return "[visits = " + visitCount + ", accum. score = " + accumulatedScore + "]";
}
}
/**
* Object to be used as key for a move in hash tables.
*
* @author Dennis Soemers
*/
public static class MoveKey
{
/** The full move object */
public final Move move;
/** Depth at which move was played (only taken into account for passes and swaps) */
public final int moveDepth;
/** Cached hashCode */
private final int cachedHashCode;
/**
* Constructor
* @param move
* @param depth Depth at which the move was played. Can be 0 if not known.
* Only used to distinguish pass/swap moves at different levels of search tree.
*/
public MoveKey(final Move move, final int depth)
{
this.move = move;
this.moveDepth = depth;
final int prime = 31;
int result = 1;
if (move.isPass())
{
result = prime * result + depth + 1297;
}
else if (move.isSwap())
{
result = prime * result + depth + 587;
}
else
{
if (!move.isOrientedMove())
{
result = prime * result + (move.toNonDecision() + move.fromNonDecision());
}
else
{
result = prime * result + move.toNonDecision();
result = prime * result + move.fromNonDecision();
}
result = prime * result + move.stateNonDecision();
}
result = prime * result + move.mover();
cachedHashCode = result;
}
@Override
public int hashCode()
{
return cachedHashCode;
}
@Override
public boolean equals(final Object obj)
{
if (this == obj)
return true;
if (!(obj instanceof MoveKey))
return false;
final MoveKey other = (MoveKey) obj;
if (move == null)
return (other.move == null);
if (move.mover() != other.move.mover())
return false;
final boolean movePass = move.isPass();
final boolean otherMovePass = other.move.isPass();
final boolean moveSwap = move.isSwap();
final boolean otherMoveSwap = other.move.isSwap();
if (movePass)
{
return (otherMovePass && moveDepth == other.moveDepth);
}
else if (moveSwap)
{
return (otherMoveSwap && moveDepth == other.moveDepth);
}
else
{
if (otherMovePass || otherMoveSwap)
return false;
if (move.isOrientedMove() != other.move.isOrientedMove())
return false;
if (move.isOrientedMove())
{
if (move.toNonDecision() != other.move.toNonDecision() || move.fromNonDecision() != other.move.fromNonDecision())
return false;
}
else
{
boolean fine = false;
if
(
(move.toNonDecision() == other.move.toNonDecision() && move.fromNonDecision() == other.move.fromNonDecision())
||
(move.toNonDecision() == other.move.fromNonDecision() && move.fromNonDecision() == other.move.toNonDecision())
)
{
fine = true;
}
if (!fine)
return false;
}
return (move.stateNonDecision() == other.move.stateNonDecision());
}
}
@Override
public String toString()
{
return "[Move = " + move + ", Hash = " + cachedHashCode + "]";
}
}
/**
* Object to be used as key for an N-gram of moves in hash tables.
*
* @author Dennis Soemers
*/
public static class NGramMoveKey
{
/** The array of full move object */
public final Move[] moves;
/** Depth at which move was played (only taken into account for passes and swaps) */
private final int moveDepth;
/** Cached hashCode */
private final int cachedHashCode;
/**
* Constructor
* @param moves
* @param depth Depth at which the first move of N-gram was played. Can be 0 if not known.
* Only used to distinguish pass/swap moves at different levels of search tree.
*/
public NGramMoveKey(final Move[] moves, final int depth)
{
this.moves = moves;
this.moveDepth = depth;
final int prime = 31;
int result = 1;
for (int i = 0; i < moves.length; ++i)
{
final Move move = moves[i];
if (move.isPass())
{
result = prime * result + depth + i + 1297;
}
else if (move.isSwap())
{
result = prime * result + depth + i + 587;
}
else
{
if (!move.isOrientedMove())
{
result = prime * result + (move.toNonDecision() + move.fromNonDecision());
}
else
{
result = prime * result + move.toNonDecision();
result = prime * result + move.fromNonDecision();
}
result = prime * result + move.stateNonDecision();
}
result = prime * result + move.mover();
}
cachedHashCode = result;
}
@Override
public int hashCode()
{
return cachedHashCode;
}
@Override
public boolean equals(final Object obj)
{
if (this == obj)
return true;
if (!(obj instanceof NGramMoveKey))
return false;
final NGramMoveKey other = (NGramMoveKey) obj;
if (moves.length != other.moves.length)
return false;
for (int i = 0; i < moves.length; ++i)
{
final Move move = moves[i];
final Move otherMove = other.moves[i];
if (move.mover() != otherMove.mover())
return false;
final boolean movePass = move.isPass();
final boolean otherMovePass = otherMove.isPass();
final boolean moveSwap = move.isSwap();
final boolean otherMoveSwap = otherMove.isSwap();
if (movePass)
{
return (otherMovePass && moveDepth == other.moveDepth);
}
else if (moveSwap)
{
return (otherMoveSwap && moveDepth == other.moveDepth);
}
else
{
if (otherMovePass || otherMoveSwap)
return false;
if (move.isOrientedMove() != otherMove.isOrientedMove())
return false;
if (move.isOrientedMove())
{
if (move.toNonDecision() != otherMove.toNonDecision() || move.fromNonDecision() != otherMove.fromNonDecision())
return false;
}
else
{
boolean fine = false;
if
(
(move.toNonDecision() == otherMove.toNonDecision() && move.fromNonDecision() == otherMove.fromNonDecision())
||
(move.toNonDecision() == otherMove.fromNonDecision() && move.fromNonDecision() == otherMove.toNonDecision())
)
{
fine = true;
}
if (!fine)
return false;
if (!(move.stateNonDecision() == otherMove.stateNonDecision()))
return false;
}
}
}
return true;
}
@Override
public String toString()
{
return "[Moves = " + Arrays.toString(moves) + ", Hash = " + cachedHashCode + "]";
}
}
}
| 52,264 | 26.306688 | 118 | java |
Ludii | Ludii-master/AI/src/search/mcts/backpropagation/AlphaGoBackprop.java | package search.mcts.backpropagation;
import other.context.Context;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
import utils.AIUtils;
/**
* An AlphaGo-style backpropagation, that returns a convex combination
* of a heuristic value function evaluated at the expanded node and
* a heuristic value function evaluated at the end of a playout.
*
* Can also be used for Alpha(Go) Zero style backpropagations
* by simply using a weight of 0.0 for playout value, and 1.0
* for the expanded node's value (plus, for efficiency, using
* 0-length playouts).
*
* @author Dennis Soemers
*/
public class AlphaGoBackprop extends BackpropagationStrategy
{
@Override
public void computeUtilities
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
)
{
assert (mcts.heuristics() != null);
final double playoutValueWeight = mcts.playoutValueWeight();
final double[] nodeHeuristicValues;
if (playoutValueWeight < 1.0)
{
// Mix value function of expanded node with playout outcome (like AlphaGo)
nodeHeuristicValues = startNode.heuristicValueEstimates();
}
else
{
// This array is irrelevant
nodeHeuristicValues = new double[utilities.length];
}
if (context.active() && playoutValueWeight > 0.0)
{
// Playout did not terminate, so should also run heuristics at end of playout
final double[] playoutHeuristicValues = AIUtils.heuristicValueEstimates(context, mcts.heuristics());
for (int p = 1; p < utilities.length; ++p)
{
utilities[p] = playoutHeuristicValues[p];
}
}
for (int p = 1; p < utilities.length; ++p)
{
// Mix node and playout values
utilities[p] = playoutValueWeight * utilities[p] + (1.0 - playoutValueWeight) * nodeHeuristicValues[p];
}
}
@Override
public int backpropagationFlags()
{
return 0;
}
}
| 1,902 | 24.716216 | 106 | java |
Ludii | Ludii-master/AI/src/search/mcts/backpropagation/BackpropagationStrategy.java | package search.mcts.backpropagation;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.json.JSONObject;
import other.context.Context;
import other.move.Move;
import search.mcts.MCTS;
import search.mcts.MCTS.ActionStatistics;
import search.mcts.MCTS.MoveKey;
import search.mcts.MCTS.NGramMoveKey;
import search.mcts.nodes.BaseNode;
import search.mcts.nodes.BaseNode.NodeStatistics;
/**
* Abstract class for implementations of backpropagation in MCTS
*
* @author Dennis Soemers
*/
public abstract class BackpropagationStrategy
{
//-------------------------------------------------------------------------
/** Flags for things we have to backpropagate */
protected int backpropFlags = 0;
/** AMAF stats per node for use by GRAVE (may be slightly different than stats used by RAVE/AMAF) */
public final static int GRAVE_STATS = 0x1;
/** Global MCTS-wide action statistics (e.g., for Progressive History) */
public final static int GLOBAL_ACTION_STATS = (0x1 << 1);
/** Global MCTS-wide N-gram action statistics (e.g., for NST) */
public final static int GLOBAL_NGRAM_ACTION_STATS = (0x1 << 2);
/** For every player, track global MCTS-wide stats on heuristic evaluations */
public final static int GLOBAL_HEURISTIC_STATS = (0x1 << 3);
//-------------------------------------------------------------------------
/**
* Set backprop flags for this backpropagation implementation
* @param backpropFlags
*/
public void setBackpropFlags(final int backpropFlags)
{
this.backpropFlags = backpropFlags;
}
//-------------------------------------------------------------------------
/**
* Computes the array of utilities that we want to backpropagate.
* This method is expected to modify the given utilities array in-place
*
* @param mcts
* @param startNode
* @param context
* @param utilities
* @param numPlayoutMoves
*/
public abstract void computeUtilities
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
);
/**
* @return Additional flags for data this Backpropagation wants to track.
*/
public abstract int backpropagationFlags();
//-------------------------------------------------------------------------
/**
* Updates the given node with statistics based on the given trial
* @param mcts
* @param startNode
* @param context
* @param utilities
* @param numPlayoutMoves
*/
public final void update
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
)
{
BaseNode node = startNode;
computeUtilities(mcts, startNode, context, utilities, numPlayoutMoves);
//System.out.println("utilities = " + Arrays.toString(utilities));
final boolean updateGRAVE = ((backpropFlags & GRAVE_STATS) != 0);
final boolean updateGlobalActionStats = ((backpropFlags & GLOBAL_ACTION_STATS) != 0);
final boolean updateGlobalNGramActionStats = ((backpropFlags & GLOBAL_NGRAM_ACTION_STATS) != 0);
final List<MoveKey> moveKeysAMAF = new ArrayList<MoveKey>();
final Iterator<Move> reverseMovesIterator = context.trial().reverseMoveIterator();
final int numTrialMoves = context.trial().numMoves();
int movesIdxAMAF = numTrialMoves - 1;
if (updateGRAVE || updateGlobalActionStats || updateGlobalNGramActionStats)
{
// collect all move keys for playout moves
while (movesIdxAMAF >= (numTrialMoves - numPlayoutMoves))
{
moveKeysAMAF.add(new MoveKey(reverseMovesIterator.next(), movesIdxAMAF));
--movesIdxAMAF;
}
}
while (node != null)
{
synchronized(node)
{
node.update(utilities);
if (updateGRAVE)
{
for (final MoveKey moveKey : moveKeysAMAF)
{
final NodeStatistics graveStats = node.getOrCreateGraveStatsEntry(moveKey);
//System.out.println("updating GRAVE stats in " + node + " for move: " + moveKey);
graveStats.visitCount += 1;
graveStats.accumulatedScore += utilities[context.state().playerToAgent(moveKey.move.mover())];
// the below would be sufficient for RAVE, but for GRAVE we also need moves
// made by the "incorrect" colour in higher-up nodes
/*
final int mover = moveKey.move.mover();
if (nodeColour == 0 || nodeColour == mover)
{
final NodeStatistics graveStats = node.getOrCreateGraveStatsEntry(moveKey);
graveStats.visitCount += 1;
graveStats.accumulatedScore += utilities[mover];
}*/
}
}
}
if (updateGRAVE || updateGlobalActionStats)
{
// we're going up one level, so also one more move to count as AMAF-move
if (movesIdxAMAF >= 0)
{
moveKeysAMAF.add(new MoveKey(reverseMovesIterator.next(), movesIdxAMAF));
--movesIdxAMAF;
}
}
node = node.parent();
}
updateGlobalActionStats
(
mcts, updateGlobalActionStats, updateGlobalNGramActionStats, moveKeysAMAF, context, utilities
);
}
//-------------------------------------------------------------------------
/**
* Helper method to update global (MCTS-wide) action stats for
* techniques such as RAVE, GRAVE, MAST, NST, etc.
*
* Can be reused by various different backpropagation implementations.
*
* @param mcts
* @param updateGlobalActionStats
* @param updateGlobalNGramActionStats
* @param moveKeysAMAF
* @param context
* @param utilities
*/
public static void updateGlobalActionStats
(
final MCTS mcts,
final boolean updateGlobalActionStats,
final boolean updateGlobalNGramActionStats,
final List<MoveKey> moveKeysAMAF,
final Context context,
final double[] utilities
)
{
if (updateGlobalActionStats || updateGlobalNGramActionStats)
{
// Update global, MCTS-wide action statistics
for (final MoveKey moveKey : moveKeysAMAF)
{
final ActionStatistics actionStats = mcts.getOrCreateActionStatsEntry(moveKey);
//System.out.println("updating global action stats for move: " + moveKey);
actionStats.visitCount += 1.0;
actionStats.accumulatedScore += utilities[context.state().playerToAgent(moveKey.move.mover())];
}
if (updateGlobalNGramActionStats)
{
// Also do N-grams for N > 1
// note: list of move keys is stored in reverse order
for (int startMove = moveKeysAMAF.size() - 1; startMove >= 1; --startMove)
{
final int maxNGramLength = Math.min(mcts.maxNGramLength(), startMove + 1);
final int nGramsDepth = moveKeysAMAF.get(startMove).moveDepth;
final int nGramsMover = moveKeysAMAF.get(startMove).move.mover();
// Start at 2, since the 1-length "n-grams" are already handled in normal action stats table
for (int n = 2; n <= maxNGramLength; ++n)
{
final Move[] nGram = new Move[n];
for (int i = 0; i < n; ++i)
{
nGram[i] = moveKeysAMAF.get(startMove - i).move;
}
final ActionStatistics nGramStats = mcts.getOrCreateNGramActionStatsEntry(new NGramMoveKey(nGram, nGramsDepth));
nGramStats.visitCount += 1.0;
nGramStats.accumulatedScore += utilities[context.state().playerToAgent(nGramsMover)];
}
}
}
}
}
//-------------------------------------------------------------------------
/**
* @param json
* @return Playout strategy constructed from given JSON object
*/
public static BackpropagationStrategy fromJson(final JSONObject json)
{
BackpropagationStrategy backprop = null;
final String strategy = json.getString("strategy");
if (strategy.equalsIgnoreCase("MonteCarlo"))
{
return new MonteCarloBackprop();
}
return backprop;
}
//-------------------------------------------------------------------------
}
| 7,744 | 29.734127 | 118 | java |
Ludii | Ludii-master/AI/src/search/mcts/backpropagation/HeuristicBackprop.java | package search.mcts.backpropagation;
import other.context.Context;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
import utils.AIUtils;
/**
* Implementation of backpropagation that uses heuristic value estimates
* for any player that is still active at the end of a playout, instead
* of defaulting to 0.0
*
* @author Dennis Soemers
*/
public class HeuristicBackprop extends BackpropagationStrategy
{
@Override
public void computeUtilities
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
)
{
assert (mcts.heuristics() != null);
if (context.active())
{
// Playout did not terminate, so should run heuristics at end of playout
final double[] playoutHeuristicValues = AIUtils.heuristicValueEstimates(context, mcts.heuristics());
for (int p = 1; p < utilities.length; ++p)
{
utilities[p] = playoutHeuristicValues[p];
}
}
}
@Override
public int backpropagationFlags()
{
return 0;
}
}
| 1,033 | 20.541667 | 103 | java |
Ludii | Ludii-master/AI/src/search/mcts/backpropagation/MonteCarloBackprop.java | package search.mcts.backpropagation;
import other.context.Context;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Standard backpropagation implementation for MCTS, performing Monte-Carlo backups
* of playout outcomes.
*
* @author Dennis Soemers
*/
public class MonteCarloBackprop extends BackpropagationStrategy
{
@Override
public void computeUtilities
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
)
{
// Do nothing
}
@Override
public int backpropagationFlags()
{
return 0;
}
}
| 612 | 16.027778 | 83 | java |
Ludii | Ludii-master/AI/src/search/mcts/backpropagation/QualitativeBonus.java | package search.mcts.backpropagation;
import main.math.statistics.IncrementalStats;
import other.context.Context;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
import utils.AIUtils;
/**
* Implements a Qualitative bonus (based on heuristic value function estimates),
* as described in "Quality-based Rewards for Monte-Carlo Tree Search Simulations"
*
* @author Dennis Soemers
*/
public class QualitativeBonus extends BackpropagationStrategy
{
//-------------------------------------------------------------------------
/** Constant used in sigmoid squashing of bonus */
private final double k = 1.4;
/** Weight assigned to bonuses */
private final double a = 0.25;
//-------------------------------------------------------------------------
@Override
public void computeUtilities
(
final MCTS mcts,
final BaseNode startNode,
final Context context,
final double[] utilities,
final int numPlayoutMoves
)
{
assert (mcts.heuristics() != null);
final double[] heuristicValues = AIUtils.heuristicValueBonusEstimates(context, mcts.heuristics());
final IncrementalStats[] heuristicStats = mcts.heuristicStats();
for (int p = 1; p < heuristicValues.length; ++p)
{
final IncrementalStats stats = heuristicStats[p];
final double q = heuristicValues[p];
final double std = stats.getStd();
if (std > 0.0)
{
// Apply bonus
final double lambda = (q - stats.getMean()) / std;
final double bonus = -1.0 + (2.0 / (1.0 + Math.exp(-k * lambda)));
utilities[p] += a * bonus; // Not including sign(r) since our bonuses are from p perspective, not from winner's perspective
}
// Update incremental stats tracker
stats.observe(q);
}
}
@Override
public int backpropagationFlags()
{
return BackpropagationStrategy.GLOBAL_HEURISTIC_STATS;
}
//-------------------------------------------------------------------------
}
| 1,925 | 26.126761 | 127 | java |
Ludii | Ludii-master/AI/src/search/mcts/finalmoveselection/FinalMoveSelectionStrategy.java | package search.mcts.finalmoveselection;
import org.json.JSONObject;
import other.move.Move;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Interface for different strategies of finally selecting the move to play in the real game
* (after searching finished)
*
* @author Dennis Soemers
*
*/
public interface FinalMoveSelectionStrategy
{
//-------------------------------------------------------------------------
/**
* Should be implemented to select the move to play in the real game
*
* @param mcts
* @param rootNode
* @return The move.
*/
public Move selectMove(final MCTS mcts, final BaseNode rootNode);
//-------------------------------------------------------------------------
/**
* Customise the final move selection strategy based on a list of given string inputs
*
* @param inputs
*/
public void customise(final String[] inputs);
//-------------------------------------------------------------------------
/**
* @param json
* @return Final Move Selection strategy constructed from given JSON object
*/
public static FinalMoveSelectionStrategy fromJson(final JSONObject json)
{
FinalMoveSelectionStrategy selection = null;
final String strategy = json.getString("strategy");
if (strategy.equalsIgnoreCase("RobustChild"))
{
return new RobustChild();
}
return selection;
}
//-------------------------------------------------------------------------
}
| 1,461 | 22.967213 | 92 | java |
Ludii | Ludii-master/AI/src/search/mcts/finalmoveselection/MaxAvgScore.java | package search.mcts.finalmoveselection;
import java.util.concurrent.ThreadLocalRandom;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Selects move corresponding to the child with the highest average score
*
* @author Dennis Soemers
*/
public final class MaxAvgScore implements FinalMoveSelectionStrategy
{
//-------------------------------------------------------------------------
@Override
public Move selectMove(final MCTS mcts, final BaseNode rootNode)
{
int bestIdx = -1;
double maxAvgScore = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final State state = rootNode.contextRef().state();
final int numChildren = rootNode.numLegalMoves();
final int moverAgent = state.playerToAgent(state.mover());
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = rootNode.childForNthLegalMove(i);
final double avgScore;
if (child == null)
avgScore = rootNode.valueEstimateUnvisitedChildren(moverAgent);
else
avgScore = child.expectedScore(moverAgent);
if (avgScore > maxAvgScore)
{
maxAvgScore = avgScore;
bestIdx = i;
numBestFound = 1;
}
else if (avgScore == maxAvgScore &&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0)
{
bestIdx = i;
}
}
return rootNode.nthLegalMove(bestIdx);
}
//-------------------------------------------------------------------------
@Override
public void customise(final String[] inputs)
{
// do nothing
}
//-------------------------------------------------------------------------
}
| 1,852 | 26.25 | 76 | java |
Ludii | Ludii-master/AI/src/search/mcts/finalmoveselection/ProportionalExpVisitCount.java | package search.mcts.finalmoveselection;
import main.collections.FVector;
import other.move.Move;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Selects moves proportionally to exponentiated visit counts,
*
* This strategy should never be used for "competitive" play, but can be useful
* to generate more variety in experience in self-play.
*
* @author Dennis Soemers
*/
public final class ProportionalExpVisitCount implements FinalMoveSelectionStrategy
{
//-------------------------------------------------------------------------
/** Temperature parameter tau (all visit counts will be raised to this power to generate distribution) */
protected double tau;
//-------------------------------------------------------------------------
/**
* Constructor with temperature parameter tau
* (1.0 = proportional to visit counts, 0.0 = greedy)
* @param tau
*/
public ProportionalExpVisitCount(final double tau)
{
this.tau = tau;
}
//-------------------------------------------------------------------------
@Override
public Move selectMove(final MCTS mcts, final BaseNode rootNode)
{
final FVector distribution = rootNode.computeVisitCountPolicy(tau);
final int actionIndex = distribution.sampleProportionally();
return rootNode.nthLegalMove(actionIndex);
}
//-------------------------------------------------------------------------
@Override
public void customise(final String[] inputs)
{
for (final String input : inputs)
{
if (input.startsWith("tau="))
{
tau = Double.parseDouble(input.substring("tau=".length()));
}
}
}
//-------------------------------------------------------------------------
}
| 1,701 | 26.015873 | 106 | java |
Ludii | Ludii-master/AI/src/search/mcts/finalmoveselection/RobustChild.java | package search.mcts.finalmoveselection;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import main.collections.FVector;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Selects move corresponding to the most robust child (highest visit count),
* with an additional tie-breaker based on value estimates. If the MCTS
* has a learned selection policy, that can be used as a second tie-breaker.
*
* @author Dennis Soemers
*/
public final class RobustChild implements FinalMoveSelectionStrategy
{
//-------------------------------------------------------------------------
@Override
public Move selectMove(final MCTS mcts, final BaseNode rootNode)
{
final List<Move> bestActions = new ArrayList<Move>();
double bestActionValueEstimate = Double.NEGATIVE_INFINITY;
float bestActionPolicyPrior = Float.NEGATIVE_INFINITY;
final State rootState = rootNode.contextRef().state();
final int moverAgent = rootState.playerToAgent(rootState.mover());
int maxNumVisits = -1;
final FVector priorPolicy;
if (mcts.learnedSelectionPolicy() == null)
priorPolicy = null;
else
priorPolicy = rootNode.learnedSelectionPolicy();
final int numChildren = rootNode.numLegalMoves();
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = rootNode.childForNthLegalMove(i);
final int numVisits = child == null ? 0 : child.numVisits();
final double childValueEstimate = child == null ? 0.0 : child.expectedScore(moverAgent);
final float childPriorPolicy = priorPolicy == null ? -1.f : priorPolicy.get(i);
if (numVisits > maxNumVisits)
{
maxNumVisits = numVisits;
bestActions.clear();
bestActionValueEstimate = childValueEstimate;
bestActionPolicyPrior = childPriorPolicy;
bestActions.add(rootNode.nthLegalMove(i));
}
else if (numVisits == maxNumVisits)
{
if (childValueEstimate > bestActionValueEstimate)
{
// Tie-breaker; prefer higher value estimates
bestActions.clear();
bestActionValueEstimate = childValueEstimate;
bestActionPolicyPrior = childPriorPolicy;
bestActions.add(rootNode.nthLegalMove(i));
}
else if (childValueEstimate == bestActionValueEstimate)
{
// Tie for both num visits and also for estimated value; prefer higher prior policy
if (childPriorPolicy > bestActionPolicyPrior)
{
bestActions.clear();
bestActionValueEstimate = childValueEstimate;
bestActionPolicyPrior = childPriorPolicy;
bestActions.add(rootNode.nthLegalMove(i));
}
else if (childPriorPolicy == bestActionPolicyPrior)
{
// Tie for everything
bestActions.add(rootNode.nthLegalMove(i));
}
}
}
}
return bestActions.get(ThreadLocalRandom.current().nextInt(bestActions.size()));
}
//-------------------------------------------------------------------------
@Override
public void customise(final String[] inputs)
{
// Do nothing
}
//-------------------------------------------------------------------------
}
| 3,568 | 34.69 | 100 | java |
Ludii | Ludii-master/AI/src/search/mcts/nodes/BaseNode.java | package search.mcts.nodes;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import game.Game;
import gnu.trove.list.array.TIntArrayList;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.state.State;
import policies.softmax.SoftmaxPolicyLinear;
import search.mcts.MCTS;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
import training.expert_iteration.ExItExperience;
import training.expert_iteration.ExItExperience.ExItExperienceState;
/**
* Abstract base class for nodes in MCTS search trees.
*
* @author Dennis Soemers
*/
public abstract class BaseNode
{
//-------------------------------------------------------------------------
/** Parent node */
protected BaseNode parent;
/** Move leading from parent to this node */
protected final Move parentMove;
/** Move leading from parent to this node, without consequents evaluated */
protected final Move parentMoveWithoutConseq;
/** Reference back to our MCTS algorithm */
protected final MCTS mcts;
/** Total number of times this node was visited. */
protected int numVisits = 0;
/** Number of virtual visits to this node (for Tree Parallelisation) */
protected AtomicInteger numVirtualVisits = new AtomicInteger();
/** Total scores backpropagated into this node (one per player, 0 index unused). */
protected final double[] totalScores;
/** Sums of squares of scores backpropagated into this node (one per player, 0 index unused). */
protected final double[] sumSquaredScores;
/** Value estimates based on heuristic score function, normalised to appropriate range in [-1, 1]. Can be null. */
protected double[] heuristicValueEstimates;
/** Table of AMAF stats for GRAVE */
protected final Map<MoveKey, NodeStatistics> graveStats;
/** Lock for MCTS code that modifies/reads node data in ways that should be synchronised */
protected transient ReentrantLock nodeLock = new ReentrantLock();
//-------------------------------------------------------------------------
/**
* Constructor
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param game
*/
public BaseNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Game game
)
{
this.mcts = mcts;
this.parent = parent;
this.parentMove = parentMove;
this.parentMoveWithoutConseq = parentMoveWithoutConseq;
totalScores = new double[game.players().count() + 1];
sumSquaredScores = new double[game.players().count() + 1];
heuristicValueEstimates = null;
final int backpropFlags = mcts.backpropFlags();
if ((backpropFlags & BackpropagationStrategy.GRAVE_STATS) != 0)
graveStats = new ConcurrentHashMap<MoveKey, NodeStatistics>();
else
graveStats = null;
}
//-------------------------------------------------------------------------
/**
* Adds the given new child, resulting from the move at the given move index
* @param child
* @param moveIdx
*/
public abstract void addChild(final BaseNode child, final int moveIdx);
/**
* @param n
* @return Child node for the nth legal move in this node (in current iteration)
*/
public abstract BaseNode childForNthLegalMove(final int n);
/**
* @return Reference to Context object for this node. Callers are
* expected NOT to modify this object (i.e. not directly apply moves on it)
*/
public abstract Context contextRef();
/**
* @return Deterministic reference to Context object. This will be
* null for non-root nodes in open-loop trees
*/
public abstract Context deterministicContextRef();
/**
* @param move
* @return Child node of this node corresponding to given move.
* Null if there is no child matching the given move.
*/
public abstract BaseNode findChildForMove(final Move move);
/**
* @return Distribution over legal moves in this node (in current iteration)
* computed by learned Selection policy
*/
public abstract FVector learnedSelectionPolicy();
/**
* @return List of all moves that MCTS believes may be valid from this node
*/
public abstract FastArrayList<Move> movesFromNode();
/**
* @return "colour" (= player ID) for this node. Returns 0 if it could be
* any player (i.e. in nondeterministic games).
*/
public abstract int nodeColour();
/**
* @param n
* @return nth legal move in this node (in current iteration)
*/
public abstract Move nthLegalMove(final int n);
/**
* @return Number of legal moves in this node (in current iteration)
*/
public abstract int numLegalMoves();
/**
* @return Context object that MCTS can use to run a full playout
*/
public abstract Context playoutContext();
/**
* Perform any required computations when a node becomes the root node.
* @param context
*/
public abstract void rootInit(final Context context);
/**
* Tells the node that we're starting a new iteration in it,
* with the current state encapsulated in the given context object.
* @param context
*/
public abstract void startNewIteration(final Context context);
/**
* @return Sum of visits among the currently-legal children
*/
public abstract int sumLegalChildVisits();
/**
* Traverse the tree by playing the move at the given index
* @param moveIdx
* @return Context object resulting from application of given move
*/
public abstract Context traverse(final int moveIdx);
/**
* Called when we reach this node in an MCTS iteration, indicating
* that it may have to update its Context reference
*/
public abstract void updateContextRef();
/**
* Recursively clean any thread-local variables we may have (it
* seems like GC struggles with them otherwise).
*/
public abstract void cleanThreadLocals();
//-------------------------------------------------------------------------
/**
* @param agent Agent index
*
* @return Expected score for given agent. Usually just the average backpropagated score
* (accounting for virtual losses). Subclasses may override to return better estimates
* (such as proven scores) if they have them.
*/
public double expectedScore(final int agent)
{
return (numVisits == 0) ? 0.0 : (totalScores[agent] - numVirtualVisits.get()) / (numVisits + numVirtualVisits.get());
}
/**
* @param agent Agent index
*
* @return Exploitation score / term for given agent. Generally just expected score, but
* subclasses may return different values to account for pruning/solving/etc.
*/
public double exploitationScore(final int agent)
{
return expectedScore(agent);
}
/**
* @param agent
* @return Is the value for given agent fully proven in this node?
*/
@SuppressWarnings("static-method")
public boolean isValueProven(final int agent)
{
return false;
}
/**
* @return Array of heuristic value estimates: one per player. Array can be null if MCTS
* has no heuristics.
*/
public double[] heuristicValueEstimates()
{
return heuristicValueEstimates;
}
/**
* @return Num visits (i.e. MCTS iterations) for this node
*/
public int numVisits()
{
return numVisits;
}
/**
* @return Number of virtual visits
*/
public int numVirtualVisits()
{
return numVirtualVisits.get();
}
/**
* Adds one virtual visit to this node (will be subtracted again during backpropagation)
*/
public void addVirtualVisit()
{
numVirtualVisits.incrementAndGet();
}
/**
* @return Parent node, or null if this is the root
*/
public BaseNode parent()
{
return parent;
}
/**
* @return Move leading from parent node to this node
*/
public Move parentMove()
{
return parentMove;
}
/**
* Sets the number of visits of this node to the given number
* @param numVisits
*/
public void setNumVisits(final int numVisits)
{
this.numVisits = numVisits;
}
/**
* Set the parent node of this node
* @param newParent
*/
public void setParent(final BaseNode newParent)
{
this.parent = newParent;
}
/**
* Sets the array of heuristic value estimates for this node
* NOTE: (value estimates per player, not per child node).
* @param heuristicValueEstimates
*/
public void setHeuristicValueEstimates(final double[] heuristicValueEstimates)
{
this.heuristicValueEstimates = heuristicValueEstimates;
}
/**
* @param player Player index
* @return Total score (sum of scores) backpropagated into this node for player
*/
public double totalScore(final int player)
{
return totalScores[player];
}
/**
* @param player Player index
* @return Sum of squared scores backpropagated into this node for player. NOTE: also adds virtual losses.
*/
public double sumSquaredScores(final int player)
{
return sumSquaredScores[player] + numVirtualVisits.get();
}
/**
* Backpropagates result with vector of utilities
* @param utilities The utilities.
*/
public void update(final double[] utilities)
{
++numVisits;
for (int p = 1; p < totalScores.length; ++p)
{
totalScores[p] += utilities[p];
sumSquaredScores[p] += utilities[p] * utilities[p];
}
numVirtualVisits.decrementAndGet();
}
/**
* @param agent Agent index
*
* @return Value estimate for unvisited children of this node
*/
public double valueEstimateUnvisitedChildren(final int agent)
{
switch (mcts.qInit())
{
case DRAW:
return 0.0;
case INF:
return 10000.0;
case LOSS:
return -1.0;
case PARENT:
if (numVisits == 0)
{
return 10000.0;
}
else
{
return expectedScore(agent);
}
case WIN:
return 1.0;
default:
return 0.0;
}
}
//-------------------------------------------------------------------------
/**
* @param moveKey
* @return GRAVE's AMAF stats for given move key. Creates new entry if it
* does not yet exist in this node's table.
*/
public NodeStatistics getOrCreateGraveStatsEntry(final MoveKey moveKey)
{
NodeStatistics stats = graveStats.get(moveKey);
if (stats == null)
{
stats = new NodeStatistics();
graveStats.put(moveKey, stats);
//System.out.println("creating entry for " + moveKey + " in " + this);
}
return stats;
}
/**
* @param moveKey
* @return GRAVE's AMAF stats for given move key.
*/
public NodeStatistics graveStats(final MoveKey moveKey)
{
// if (!graveStats.containsKey(moveKey))
// {
// System.out.println("will be returning null! Total num keys in this node = " + graveStats.keySet().size());
// for (final MoveKey key : graveStats.keySet())
// {
// System.out.println("key = " + key + ", stats = " + graveStats.get(key));
// }
// }
return graveStats.get(moveKey);
}
//-------------------------------------------------------------------------
/**
* Computes a policy over the list of children based on the visit counts
* collected through an MCTS search process. The policy assigns probability
* to actions proportional to the exponentiated visit counts
* N(s, a)^{1 / tau}, where tau is a temperature parameter, as described
* in the AlphaGo Zero paper.
*
* Special cases:
* - tau = 1.f results in a policy proportional to the raw visit counts
* - tau = 0.f (or, mathematically, tau --> 0.f) results in a greedy
* policy w.r.t. visit counts
*
* AlphaGo Zero used tau = 1.f for the first 30 moves of every game,
* and tau = 0.f afterwards.
* Anthony, Tian, and Barber (2017 NIPS paper) used tau = 1.f everywhere.
*
* Note that sampling from the distribution that would result from the
* tau = 0.f case (greedy) could technically be implemented slightly more
* efficiently by sampling directly, using an implementation as used by
* RobustChild, rather than first computing the distribution and then
* sampling from it.
*
* @param tau
* @return Vector.
*/
public FVector computeVisitCountPolicy(final double tau)
{
final FVector policy = new FVector(numLegalMoves());
if (tau == 0.0)
{
// special case: simply want to select greedily with respect to
// visit count
// first find what the maximum visit count is,
// and which children have that visit count
int maxVisitCount = -1;
final TIntArrayList maxVisitCountChildren = new TIntArrayList();
for (int i = 0; i < numLegalMoves(); ++i)
{
final BaseNode child = childForNthLegalMove(i);
final int visitCount;
if (child == null)
{
visitCount = 0;
}
else
{
visitCount = child.numVisits;
}
if (visitCount > maxVisitCount)
{
maxVisitCount = visitCount;
maxVisitCountChildren.reset();
maxVisitCountChildren.add(i);
}
else if (visitCount == maxVisitCount)
{
maxVisitCountChildren.add(i);
}
}
// this is the probability we assign to all max children
final float maxProb = 1.f / maxVisitCountChildren.size();
// now assign the probabilities to indices
for (int i = 0; i < maxVisitCountChildren.size(); ++i)
{
policy.set(maxVisitCountChildren.getQuick(i), maxProb);
}
}
else
{
// first collect visit counts in vector
for (int i = 0; i < numLegalMoves(); ++i)
{
final BaseNode child = childForNthLegalMove(i);
final int visitCount;
if (child == null)
{
visitCount = 0;
}
else
{
visitCount = child.numVisits;
}
policy.set(i, visitCount);
}
if (tau != 1.0) // need to use exponentiate visit counts
{
policy.raiseToPower(1.0 / tau);
}
final float sumVisits = policy.sum();
if (sumVisits > 0.f)
policy.mult(1.f / policy.sum());
}
return policy;
}
//-------------------------------------------------------------------------
/**
* @return The normalised entropy of the discrete distribution implied by
* the MCTS visit counts among this node's children.
*/
public double normalisedEntropy()
{
// Compute distribution implied by visit counts
final FVector distribution = computeVisitCountPolicy(1.f);
final int dim = distribution.dim();
if (dim <= 1)
{
return 0.0;
}
// Compute unnormalised entropy
// (in nats, unit won't matter after normalisation)
double entropy = 0.0;
for (int i = 0; i < dim; ++i)
{
final float prob = distribution.get(i);
if (prob > 0.f)
{
entropy -= prob * Math.log(prob);
}
}
// Normalise and return
return (entropy / Math.log(dim));
}
/**
* @return The normalised entropy of the distribution computed by the
* learned Selection policy for this node.
*/
public double learnedSelectionPolicyNormalisedEntropy()
{
// compute distribution using learned Selection policy
final FVector distribution = learnedSelectionPolicy();
final int dim = distribution.dim();
if (dim <= 1)
{
return 0.0;
}
// Compute unnormalised entropy
// (in nats, unit won't matter after normalisation)
double entropy = 0.0;
for (int i = 0; i < dim; ++i)
{
final float prob = distribution.get(i);
if (prob > 0.f)
{
entropy -= prob * Math.log(prob);
}
}
// Normalise and return
return (entropy / Math.log(dim));
}
/**
* @return The normalised entropy of the distribution computed by the
* learned Play-out policy for this node.
*/
public double learnedPlayoutPolicyNormalisedEntropy()
{
// compute distribution using learned Play-out policy
final FVector distribution =
((SoftmaxPolicyLinear) mcts.playoutStrategy()).computeDistribution(
contextRef(), contextRef().game().moves(contextRef()).moves(), true);
final int dim = distribution.dim();
if (dim <= 1)
{
return 0.0;
}
// Compute unnormalised entropy
// (in nats, unit won't matter after normalisation)
double entropy = 0.0;
for (int i = 0; i < dim; ++i)
{
final float prob = distribution.get(i);
if (prob > 0.f)
{
entropy -= prob * Math.log(prob);
}
}
// Normalise and return
return (entropy / Math.log(dim));
}
//-------------------------------------------------------------------------
/**
* @param weightVisitCount
* @return A sample of experience for learning with Expert Iteration
*/
public ExItExperience generateExItExperience(final float weightVisitCount)
{
final FastArrayList<Move> actions = new FastArrayList<Move>(numLegalMoves());
final float[] valueEstimates = new float[numLegalMoves()];
final State state = deterministicContextRef().state();
for (int i = 0; i < numLegalMoves(); ++i)
{
final BaseNode child = childForNthLegalMove(i);
final Move m = new Move(nthLegalMove(i));
m.setMover(nthLegalMove(i).mover());
m.then().clear(); // Can't serialise these, and won't need them
actions.add(m);
if (child == null)
valueEstimates[i] = -1.f;
else
valueEstimates[i] = (float) child.expectedScore(state.playerToAgent(state.mover()));
}
FVector visitCountPolicy = computeVisitCountPolicy(1.0);
final float min = visitCountPolicy.min();
boolean allPruned = true;
for (int i = 0; i < numLegalMoves(); ++i)
{
final BaseNode child = childForNthLegalMove(i);
if (child != null && child instanceof ScoreBoundsNode)
{
if (((ScoreBoundsNode) child).isPruned())
visitCountPolicy.set(i, min);
else
allPruned = false;
}
else
{
allPruned = false;
}
}
if (allPruned) // Special case; if EVERYTHING gets pruned, we prefer to stick to existing biases
visitCountPolicy = computeVisitCountPolicy(1.0);
else
visitCountPolicy.normalise();
return new ExItExperience
(
new Context(deterministicContextRef()),
new ExItExperienceState(deterministicContextRef()),
actions,
visitCountPolicy,
FVector.wrap(valueEstimates),
weightVisitCount
);
}
/**
* @return List of samples of experience for learning with Expert Iteration
*/
public List<ExItExperience> generateExItExperiences()
{
final List<ExItExperience> experiences = new ArrayList<ExItExperience>();
experiences.add(generateExItExperience(1.f));
// final State myState = this.contextRef().state();
//
// for (int i = 0; i < numLegalMoves(); ++i)
// {
// final BaseNode child = childForNthLegalMove(i);
// if (child != null && child.numVisits() > 0 && child.isValueProven(myState.playerToAgent(myState.mover())) && child.numLegalMoves() > 0)
// experiences.add(child.generateExItExperience(((float) child.numVisits() / numVisits())));
// }
return experiences;
}
//-------------------------------------------------------------------------
/**
* @return Lock for this node
*/
public ReentrantLock getLock()
{
return nodeLock;
}
//-------------------------------------------------------------------------
/**
* Wrapper class for statistics we may want to store inside nodes
* (accumulated scores + visit count)
*
* @author Dennis Soemers
*/
public static class NodeStatistics
{
/** Visit count */
public int visitCount = 0;
/** Accumulated score */
public double accumulatedScore = 0.0;
@Override
public String toString()
{
return "[visits = " + visitCount + ", accum. score = " + accumulatedScore + "]";
}
}
//-------------------------------------------------------------------------
}
| 21,079 | 26.95756 | 143 | java |
Ludii | Ludii-master/AI/src/search/mcts/nodes/DeterministicNode.java | package search.mcts.nodes;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import search.mcts.MCTS;
/**
* Abstract class for nodes for any deterministic game.
*
* @author Dennis Soemers
*/
public abstract class DeterministicNode extends BaseNode
{
//-------------------------------------------------------------------------
/** Context for this node (contains game state) */
protected final Context context;
/** Array of child nodes. */
protected final DeterministicNode[] children;
/** Array of legal moves in this node's state */
protected final Move[] legalMoves;
/** Cached policy over the list of children */
protected FVector cachedPolicy = null;
/** Indices of relevant children (for deterministic game, every child is always relevant) */
protected final int[] childIndices;
/** Number of (potential) children that we've never visited */
protected int numUnvisitedChildren = -1;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param context
*/
public DeterministicNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Context context
)
{
super(mcts, parent, parentMove, parentMoveWithoutConseq, context.game());
this.context = context;
if (context.trial().over())
{
// we just created a node for a terminal game state,
// so create empty list of actions
legalMoves = new Move[0];
}
else
{
// non-terminal game state, so figure out list of actions we can
// still take
final FastArrayList<Move> actions = context.game().moves(context).moves();
legalMoves = new Move[actions.size()];
actions.toArray(legalMoves);
}
children = new DeterministicNode[legalMoves.length];
childIndices = new int[children.length];
for (int i = 0; i < childIndices.length; ++i)
{
childIndices[i] = i;
}
numUnvisitedChildren = children.length;
}
//-------------------------------------------------------------------------
@Override
public void addChild(final BaseNode child, final int moveIdx)
{
children[moveIdx] = (DeterministicNode) child;
--numUnvisitedChildren;
if (numUnvisitedChildren == 0 && MCTS.NULL_UNDO_DATA)
context.trial().nullUndoData(); // Clear a bunch of memory we no longer need
}
@Override
public DeterministicNode childForNthLegalMove(final int n)
{
return children[n];
}
@Override
public Context contextRef()
{
return context;
}
@Override
public Context deterministicContextRef()
{
return context;
}
@Override
public DeterministicNode findChildForMove(final Move move)
{
DeterministicNode result = null;
for (final DeterministicNode child : children)
{
if (child != null && child.parentMove().equals(move))
{
//System.out.println("found equal move: " + child.parentMove() + " equals " + move);
result = child;
break;
}
// else if (child != null)
// {
// System.out.println(child.parentMove() + " no match for: " + move);
// }
}
return result;
}
@Override
public FastArrayList<Move> movesFromNode()
{
return new FastArrayList<Move>(legalMoves);
}
@Override
public int nodeColour()
{
return context.state().mover();
}
@Override
public Move nthLegalMove(final int n)
{
return legalMoves[n];
}
@Override
public int numLegalMoves()
{
return children.length;
}
@Override
public Context playoutContext()
{
// Need to copy context
return mcts.copyContext(context);
}
@Override
public void rootInit(final Context cont)
{
// Do nothing
}
@Override
public void startNewIteration(final Context cont)
{
// Do nothing
}
@Override
public int sumLegalChildVisits()
{
// Just the number of visits of this node
return numVisits;
}
@Override
public Context traverse(final int moveIdx)
{
final Context newContext;
if (children[moveIdx] == null)
{
// Need to copy context
newContext = mcts.copyContext(context);
newContext.game().apply(newContext, legalMoves[moveIdx]);
}
else
{
newContext = children[moveIdx].context;
}
return newContext;
}
@Override
public void updateContextRef()
{
// Do nothing
}
@Override
public void cleanThreadLocals()
{
// Do nothing
}
//-------------------------------------------------------------------------
/**
* @return Array of child nodes
*/
public DeterministicNode[] children()
{
return children;
}
/**
* @return List of legal actions for this node's state
*/
public Move[] legalActions()
{
return legalMoves;
}
//-------------------------------------------------------------------------
@Override
public FVector learnedSelectionPolicy()
{
// NOTE: by caching policy, we're assuming here that our learned policy
// will never change in the middle of a single game. Have to change
// this if we ever want to experiment with online learning in the
// middle of a game.
if (cachedPolicy == null)
{
// didn't compute policy yet, so need to do so
cachedPolicy =
mcts.learnedSelectionPolicy().computeDistribution(
context, new FastArrayList<Move>(legalMoves), true);
}
return cachedPolicy;
}
//-------------------------------------------------------------------------
}
| 6,164 | 22.711538 | 96 | java |
Ludii | Ludii-master/AI/src/search/mcts/nodes/OpenLoopNode.java | package search.mcts.nodes;
import java.util.ArrayList;
import java.util.List;
import game.Game;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import policies.softmax.SoftmaxPolicy;
import search.mcts.MCTS;
/**
* Node class for Open-Loop implementations of MCTS.
* This is primarily intended for nondeterministic games.
*
* @author Dennis Soemers
*/
public final class OpenLoopNode extends BaseNode
{
//-------------------------------------------------------------------------
/** List of child nodes */
protected final List<OpenLoopNode> children = new ArrayList<OpenLoopNode>(10);
/** Context object for current iteration being run through this node */
protected ThreadLocal<Context> currentItContext = ThreadLocal.withInitial(() -> {return null;});
/** Our root nodes will keep a deterministic context reference */
protected Context deterministicContext = null;
/** For the root, we no longer need thread-local current-legal move lists and can instead use a single fixed list */
protected FastArrayList<Move> rootLegalMovesList = null;
/** Current list of legal moves */
protected ThreadLocal<FastArrayList<Move>> currentLegalMoves = ThreadLocal.withInitial(() -> {return null;});
/**
* Distribution over legal moves in current iteration in this node,
* as computed by learned Selection policy
*/
protected ThreadLocal<FVector> learnedSelectionPolicy = ThreadLocal.withInitial(() -> {return null;});
/** Learned selection policy for root node, where we no longer need it to be thread-local */
protected FVector rootLearnedSelectionPolicy = null;
/**
* Array in which we store, for every potential index of a currently-legal move,
* the corresponding child node (or null if not yet expanded).
*/
protected ThreadLocal<OpenLoopNode[]> moveIdxToNode = ThreadLocal.withInitial(() -> {return null;});
/** A mapping from move indices to nodes for the root (no longer want this to be thread-local) */
protected OpenLoopNode[] rootMoveIdxToNode = null;
/** Cached logit computed according to learned selection policy */
protected ThreadLocal<Float> logit = ThreadLocal.withInitial(() -> {return Float.valueOf(Float.NaN);});
//-------------------------------------------------------------------------
/**
* Constructor
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param game
*/
public OpenLoopNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Game game
)
{
super(mcts, parent, parentMove, parentMoveWithoutConseq, game);
}
//-------------------------------------------------------------------------
@Override
public void addChild(final BaseNode child, final int moveIdx)
{
children.add((OpenLoopNode) child);
if (parent() == null && deterministicContext != null)
{
// in case of root node, we'll also want to make sure to call this
updateLegalMoveDependencies(true);
}
}
@Override
public OpenLoopNode childForNthLegalMove(final int n)
{
if (rootMoveIdxToNode != null)
return rootMoveIdxToNode[n];
return moveIdxToNode.get()[n];
}
@Override
public Context contextRef()
{
return currentItContext.get();
}
@Override
public Context deterministicContextRef()
{
return deterministicContext;
}
@Override
public OpenLoopNode findChildForMove(final Move move)
{
OpenLoopNode result = null;
for (final OpenLoopNode child : children)
{
if (child.parentMove().equals(move))
{
result = child;
break;
}
}
return result;
}
@Override
public FVector learnedSelectionPolicy()
{
if (rootLearnedSelectionPolicy != null)
return rootLearnedSelectionPolicy;
return learnedSelectionPolicy.get();
}
@Override
public FastArrayList<Move> movesFromNode()
{
if (rootLegalMovesList != null)
return rootLegalMovesList;
return currentLegalMoves.get();
}
@Override
public int nodeColour()
{
return 0; // could be anyone
}
@Override
public Move nthLegalMove(final int n)
{
return movesFromNode().get(n);
}
@Override
public int numLegalMoves()
{
return movesFromNode().size();
}
@Override
public Context playoutContext()
{
// Don't need to copy context
return currentItContext.get();
}
@Override
public void rootInit(final Context context)
{
deterministicContext = context;
currentItContext.set(mcts.copyContext(context));
updateLegalMoveDependencies(true);
}
@Override
public void startNewIteration(final Context context)
{
// make a copy of given context
currentItContext.set(mcts.copyContext(context));
}
@Override
public int sumLegalChildVisits()
{
// only collect visits of children that are currently legal, not
// just the visit count of this node
int sum = 0;
for (int i = 0; i < numLegalMoves(); ++i)
{
final OpenLoopNode child = childForNthLegalMove(i);
if (child != null)
{
sum += child.numVisits;
}
}
return sum;
}
@Override
public Context traverse(final int moveIdx)
{
// No need to copy current context, just modify it
final Context context = currentItContext.get();
context.game().apply(context, movesFromNode().get(moveIdx));
return context;
}
@Override
public void updateContextRef()
{
if (parent != null)
{
// We take the same reference as our parent node
currentItContext.set(parent.contextRef());
// and update some computations based on legal moves
updateLegalMoveDependencies(false);
}
}
@Override
public void cleanThreadLocals()
{
currentItContext.remove();
currentLegalMoves.remove();
learnedSelectionPolicy.remove();
moveIdxToNode.remove();
logit.remove();
getLock().lock();
try
{
for (final OpenLoopNode child : children)
{
child.cleanThreadLocals();
}
}
finally
{
getLock().unlock();
}
}
//-------------------------------------------------------------------------
/**
* Update any internal data that depends on the list of legal
* moves in the current Context reference.
* @param root Whether this node is (or just turned into) a root node
*/
private void updateLegalMoveDependencies(final boolean root)
{
getLock().lock();
try
{
final Context context = root ? deterministicContext : currentItContext.get();
final FastArrayList<Move> legalMoves;
if (root)
{
rootLegalMovesList = new FastArrayList<Move>(context.game().moves(context).moves());
currentLegalMoves.set(null);
legalMoves = rootLegalMovesList;
}
else
{
legalMoves = new FastArrayList<Move>(context.game().moves(context).moves());
currentLegalMoves.set(legalMoves);
}
if (root)
{
// Now that this is a root node, we may be able to remove some
// children with moves that are not legal
for (int i = children.size() - 1; i >= 0; --i)
{
if (!legalMoves.contains(children.get(i).parentMoveWithoutConseq))
children.remove(i).cleanThreadLocals();
}
}
// Update mapping from legal move index to child node
final OpenLoopNode[] mapping = new OpenLoopNode[legalMoves.size()];
if (root)
{
rootMoveIdxToNode = mapping;
moveIdxToNode.set(null);
}
else
{
moveIdxToNode.set(mapping);
}
for (int i = 0; i < mapping.length; ++i)
{
final Move move = legalMoves.get(i);
for (int j = 0; j < children.size(); ++j)
{
if (move.equals(children.get(j).parentMoveWithoutConseq))
{
mapping[i] = children.get(j);
break;
}
}
}
// Update learned policy distribution
if (mcts.learnedSelectionPolicy() != null)
{
final float[] logits = new float[mapping.length];
for (int i = 0; i < logits.length; ++i)
{
if (mapping[i] != null && !Float.isNaN(mapping[i].logit.get().floatValue()))
{
logits[i] = mapping[i].logit.get().floatValue();
}
else
{
logits[i] = mcts.learnedSelectionPolicy().computeLogit(context, legalMoves.get(i));
if (mapping[i] != null)
{
mapping[i].logit.set(Float.valueOf(logits[i]));
}
}
}
final FVector dist = FVector.wrap(logits);
if (mcts.learnedSelectionPolicy() instanceof SoftmaxPolicy)
dist.softmax();
else
dist.normalise();
if (root)
{
rootLearnedSelectionPolicy = dist;
learnedSelectionPolicy.set(null);
}
else
{
learnedSelectionPolicy.set(dist);
}
}
}
finally
{
getLock().unlock();
}
}
//-------------------------------------------------------------------------
}
| 8,942 | 23.105121 | 117 | java |
Ludii | Ludii-master/AI/src/search/mcts/nodes/ScoreBoundsNode.java | package search.mcts.nodes;
import other.RankUtils;
import other.context.Context;
import other.move.Move;
import search.mcts.MCTS;
/**
* Node for MCTS tree that tracks pessimistic and optimistic score bounds, for
* solving of nodes.
*
* @author Dennis Soemers
*/
public final class ScoreBoundsNode extends DeterministicNode
{
//-------------------------------------------------------------------------
/** For every agent, a pessimistic score bound */
private final double[] pessimisticScores;
/** For every agent, an optimistic score bound */
private final double[] optimisticScores;
/**
* We'll "soft" prune a node N (make it return very negative exploitation scores)
* whenever, for the agent to make a move in its parent node, the pessimistic
* score of the parent is greater than or equal to the optimistic score of N.
*/
private boolean pruned = false;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param context
*/
public ScoreBoundsNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Context context
)
{
super(mcts, parent, parentMove, parentMoveWithoutConseq, context);
final int numPlayers = context.game().players().count();
pessimisticScores = new double[numPlayers + 1];
optimisticScores = new double[numPlayers + 1];
final double nextWorstScore = RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers);
final double nextBestScore = RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers);
final double[] currentUtils = RankUtils.agentUtilities(context);
for (int p = 1; p <= numPlayers; ++p)
{
if (!context.active(p)) // Have a proven outcome
{
pessimisticScores[p] = currentUtils[p];
optimisticScores[p] = currentUtils[p];
}
else
{
pessimisticScores[p] = nextWorstScore;
optimisticScores[p] = nextBestScore;
}
}
// Update bounds in parents (need to do this in a separate loop for correct pruning)
if (parent != null)
{
for (int p = 1; p <= numPlayers; ++p)
{
if (currentUtils[p] != 0.0)
{
((ScoreBoundsNode) parent).updatePessBounds(p, pessimisticScores[p], this);
((ScoreBoundsNode) parent).updateOptBounds(p, optimisticScores[p], this);
}
}
}
}
//-------------------------------------------------------------------------
@Override
public double expectedScore(final int agent)
{
if (pessimisticScores[agent] == optimisticScores[agent])
return pessimisticScores[agent]; // Solved this score
return super.expectedScore(agent);
}
@Override
public double exploitationScore(final int agent)
{
if (pruned)
{
final ScoreBoundsNode sbParent = (ScoreBoundsNode) parent;
if (sbParent.optBound(agent) > pessBound(agent))
return -10_000.0;
}
return super.exploitationScore(agent);
}
@Override
public boolean isValueProven(final int agent)
{
return (pessimisticScores[agent] == optimisticScores[agent]);
}
//-------------------------------------------------------------------------
/**
* One of our children has an updated pessimistic bound for the given agent;
* check if we should also update now
*
* @param agent
* @param pessBound
* @param fromChild Child from which we receive update
*/
public void updatePessBounds(final int agent, final double pessBound, final ScoreBoundsNode fromChild)
{
final double oldPess = pessimisticScores[agent];
if (pessBound > oldPess) // May be able to increase pessimistic bounds
{
final int moverAgent = contextRef().state().playerToAgent(contextRef().state().mover());
if (moverAgent == agent)
{
// The agent for which one of our children has a new pessimistic bound
// is the agent to move in this node. Hence, we can update directly
pessimisticScores[agent] = pessBound;
// Mark any children with an optimistic bound less than or equal to our
// new pessimistic bound as pruned
for (int i = 0; i < children.length; ++i)
{
final ScoreBoundsNode child = (ScoreBoundsNode) children[i];
if (child != null)
{
if (child.optBound(agent) <= pessBound)
child.markPruned();
}
}
if (parent != null)
((ScoreBoundsNode) parent).updatePessBounds(agent, pessBound, this);
}
else
{
// The agent for which one of our children has a new pessimistic bound
// is NOT the agent to move in this node. Hence, we only update to
// the minimum pessimistic bound over all children.
//
// Technically, if the real value (opt = pess) were proven for the
// agent to move, we could restrict the set of children over
// which we take the minimum to just those that have the optimal
// value for the agent to move.
//
// This is more expensive to implement though, and only relevant in
// games with more than 2 players, and there likely also only very
// rarely, so we don't bother doing this.
double minPess = pessBound;
for (int i = 0; i < children.length; ++i)
{
final ScoreBoundsNode child = (ScoreBoundsNode) children[i];
if (child == null)
{
return; // Can't update anything if we have an unvisited child left
}
else
{
final double pess = child.pessBound(agent);
if (pess < minPess)
{
if (pess == oldPess)
return; // Won't be able to update
minPess = pess;
}
}
}
if (minPess < oldPess)
{
System.err.println("ERROR in updatePessBounds()!");
System.err.println("oldPess = " + oldPess);
System.err.println("minPess = " + minPess);
System.err.println("pessBound = " + pessBound);
}
// We can update
pessimisticScores[agent] = minPess;
if (parent != null)
((ScoreBoundsNode) parent).updatePessBounds(agent, minPess, this);
}
}
}
/**
* One of our children has an updated optimistic bound for the given agent;
* check if we should also update now
*
* @param agent
* @param optBound
* @param fromChild Child from which we receive update
*/
public void updateOptBounds(final int agent, final double optBound, final ScoreBoundsNode fromChild)
{
final int moverAgent = contextRef().state().playerToAgent(contextRef().state().mover());
if (moverAgent == agent)
{
if (optBound <= pessimisticScores[agent])
{
// The optimistic bound propagated up from the child is at best as good
// as our pessimistic score for the agent to move in this node, so
// we can prune the child
fromChild.markPruned();
}
}
final double oldOpt = optimisticScores[agent];
if (optBound < oldOpt) // May be able to decrease optimistic bounds
{
// Regardless of who the mover in this node is, any given agent's optimistic
// bound should always just be the maximum over all their children
double maxOpt = optBound;
for (int i = 0; i < children.length; ++i)
{
final ScoreBoundsNode child = (ScoreBoundsNode) children[i];
if (child == null)
{
return; // Can't update anything if we have an unvisited child left
}
else
{
final double opt = child.optBound(agent);
if (opt > maxOpt)
{
if (opt == oldOpt)
return; // Won't be able to update
maxOpt = opt;
}
}
}
if (maxOpt > oldOpt)
System.err.println("ERROR in updateOptBounds()!");
// We can update
optimisticScores[agent] = maxOpt;
if (parent != null)
((ScoreBoundsNode) parent).updateOptBounds(agent, maxOpt, this);
}
}
//-------------------------------------------------------------------------
/**
* @param agent
* @return Current pessimistic bound for given agent
*/
public double pessBound(final int agent)
{
return pessimisticScores[agent];
}
/**
* @param agent
* @return Current optimistic bound for given agent
*/
public double optBound(final int agent)
{
return optimisticScores[agent];
}
/**
* Mark this node as being "pruned"
*/
public void markPruned()
{
pruned = true;
// final ScoreBoundsNode sbParent = (ScoreBoundsNode) parent;
// final int parentMover = sbParent.deterministicContextRef().state().playerToAgent(sbParent.deterministicContextRef().state().mover());
// System.out.println();
// System.out.println("Marked as pruned");
// System.out.println("Parent agent to move = " + parentMover);
// System.out.println("My pessimistic bound for agent " + parentMover + " = " + pessBound(parentMover));
// System.out.println("My optimistic bound for agent " + parentMover + " = " + optBound(parentMover));
// System.out.println("Parent pessimistic bound for agent " + parentMover + " = " + sbParent.pessBound(parentMover));
// System.out.println("Parent optimistic bound for agent " + parentMover + " = " + sbParent.optBound(parentMover));
// System.out.println("My status = " + deterministicContextRef().trial().status());
}
/**
* @return Did this node get marked as "pruned"?
*/
public boolean isPruned()
{
return pruned;
}
//-------------------------------------------------------------------------
}
| 10,119 | 30.331269 | 140 | java |
Ludii | Ludii-master/AI/src/search/mcts/nodes/StandardNode.java | package search.mcts.nodes;
import other.context.Context;
import other.move.Move;
import search.mcts.MCTS;
/**
* Nodes for "standard" MCTS search trees, for deterministic games.
* This node implementation stores a game state in every node, and
* assumes every node has a fixed list of legal actions.
*
* @author Dennis Soemers
*/
public final class StandardNode extends DeterministicNode
{
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param mcts
* @param parent
* @param parentMove
* @param parentMoveWithoutConseq
* @param context
*/
public StandardNode
(
final MCTS mcts,
final BaseNode parent,
final Move parentMove,
final Move parentMoveWithoutConseq,
final Context context
)
{
super(mcts, parent, parentMove, parentMoveWithoutConseq, context);
}
//-------------------------------------------------------------------------
}
| 1,008 | 22.465116 | 79 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/HeuristicPlayout.java | package search.mcts.playout;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.AI;
import other.context.Context;
import other.move.Move;
import other.playout.HeuristicMoveSelector;
import other.trial.Trial;
import search.mcts.MCTS;
/**
* Playout strategy that selects actions that lead to successor states that
* maximise a heuristic score from the mover's perspective.
*
* We extend the AI abstract class because this means that the outer MCTS
* will also let us init, which allows us to load heuristics from metadata
* if desired. Also means this thing can play games as a standalone AI.
*
* @author Dennis Soemers
*/
public class HeuristicPlayout extends AI implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/**
* Auto-end playouts in a draw if they take more turns than this, Negative value means
* no limit.
*
* TODO if we have heuristics anyway, might make sense to use them for a non-draw eval..
*/
protected int playoutTurnLimit = -1;
/** Filepath from which we want to load heuristics. Null if we want to load automatically from game's metadata */
protected final String heuristicsFilepath;
/** Heuristic-based PlayoutMoveSelector */
protected HeuristicMoveSelector moveSelector = new HeuristicMoveSelector();
//-------------------------------------------------------------------------
/**
* Default constructor: no cap on actions in playout, heuristics from metadata
*/
public HeuristicPlayout()
{
playoutTurnLimit = -1; // No limit
heuristicsFilepath = null;
}
/**
* Constructor
* @param heuristicsFilepath Filepath for file specifying heuristics to use
* @throws IOException
* @throws FileNotFoundException
*/
public HeuristicPlayout(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
this.playoutTurnLimit = -1; // No limit
this.heuristicsFilepath = heuristicsFilepath;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return context.game().playout(context, null, 1.0, moveSelector, -1, playoutTurnLimit, ThreadLocalRandom.current());
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
// TODO
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void initAI(final Game game, final int playerID)
{
Heuristics heuristicValueFunction;
if (heuristicsFilepath == null)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction =
new Heuristics
(
new HeuristicTerm[]
{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
}
);
}
}
else
{
heuristicValueFunction = moveSelector.heuristicValueFunction();
if (heuristicValueFunction == null)
{
String heuristicsStr;
try
{
heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction =
(Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
}
catch (final IOException e)
{
e.printStackTrace();
return;
}
}
}
if (heuristicValueFunction != null)
{
heuristicValueFunction.init(game);
moveSelector.setHeuristics(heuristicValueFunction);
}
}
@Override
public Move selectAction
(
final Game game, final Context context, final double maxSeconds,
final int maxIterations, final int maxDepth
)
{
// TODO Auto-generated method stub
System.err.println("Need to implement HeuristicPlayout::selectAction() to let it play as standalone AI!");
return null;
}
//-------------------------------------------------------------------------
}
| 4,825 | 24.807487 | 117 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/HeuristicSampingPlayout.java | package search.mcts.playout;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.AI;
import other.context.Context;
import other.move.Move;
import other.playout.HeuristicSamplingMoveSelector;
import other.trial.Trial;
import search.mcts.MCTS;
import other.move.MoveScore;
/**
* Playout strategy that selects actions that lead to successor states that
* maximise a heuristic score from the mover's perspective.
*
* We extend the AI abstract class because this means that the outer MCTS
* will also let us init, which allows us to load heuristics from metadata
* if desired. Also means this thing can play games as a standalone AI.
*
* @author Eric.Piette (based on code of Dennis Soemers and Cameron Browne)
*/
public class HeuristicSampingPlayout extends AI implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/**
* Auto-end playouts in a draw if they take more turns than this, Negative value means
* no limit.
*
* TODO if we have heuristics anyway, might make sense to use them for a non-draw eval..
*/
protected int playoutTurnLimit = -1;
/** Filepath from which we want to load heuristics. Null if we want to load automatically from game's metadata */
protected final String heuristicsFilepath;
/** Heuristic-based PlayoutMoveSelector */
protected HeuristicSamplingMoveSelector moveSelector =new HeuristicSamplingMoveSelector();
/** Score we give to winning opponents in paranoid searches in states where game is still going (> 2 players) */
private static final float PARANOID_OPP_WIN_SCORE = 10000.f;
private static final float WIN_SCORE = 10000.f;
/** We skip computing heuristics with absolute weight value lower than this */
public static final float ABS_HEURISTIC_WEIGHT_THRESHOLD = 0.01f;
/** Denominator of heuristic threshold fraction, i.e. 1/2, 1/4, 1/8, etc. */
private int fraction = 2;
/** Whether to apply same-turn continuation. */
private boolean continuation = true;
/** Our heuristic value function estimator */
private Heuristics heuristicValueFunction = null;
//-------------------------------------------------------------------------
/**
* Default constructor: no cap on actions in playout, heuristics from metadata
*/
public HeuristicSampingPlayout()
{
playoutTurnLimit = -1; // No limit
heuristicsFilepath = null;
}
/**
* Constructor
* @param heuristicsFilepath Filepath for file specifying heuristics to use
* @throws IOException
* @throws FileNotFoundException
*/
public HeuristicSampingPlayout(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
this.playoutTurnLimit = -1; // No limit
this.heuristicsFilepath = heuristicsFilepath;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return context.game().playout(context, null, 1.0, moveSelector, -1, playoutTurnLimit, ThreadLocalRandom.current());
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
// Nothing to do here.
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void initAI(final Game game, final int playerID)
{
if (heuristicsFilepath == null)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction =
new Heuristics
(
new HeuristicTerm[]
{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
}
);
}
}
else
{
heuristicValueFunction = moveSelector.heuristicValueFunction();
if (heuristicValueFunction == null)
{
String heuristicsStr;
try
{
heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction =
(Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
}
catch (final IOException e)
{
e.printStackTrace();
return;
}
}
}
if (heuristicValueFunction != null)
{
heuristicValueFunction.init(game);
moveSelector.setHeuristics(heuristicValueFunction);
}
}
@Override
public Move selectAction
(
final Game game, final Context context, final double maxSeconds,
final int maxIterations, final int maxDepth
)
{
final MoveScore moveScore = evaluateMoves(game, context);
final Move move = moveScore.move();
if (move == null)
System.out.println("** No best move.");
return move;
}
//-------------------------------------------------------------------------
/**
* @param player The player.
* @param context The context.
*
* @return Opponents of given player
*/
@SuppressWarnings("static-method")
public int[] opponents(final int player, final Context context)
{
final int numPlayersInGame = context.game().players().count();
final int[] opponents = new int[numPlayersInGame - 1];
int idx = 0;
if (context.game().requiresTeams())
{
final int tid = context.state().getTeam(player);
for (int p = 1; p <= numPlayersInGame; p++)
if (context.state().getTeam(p) != tid)
opponents[idx++] = p;
}
else
{
for (int p = 1; p <= numPlayersInGame; ++p)
{
if (p != player)
opponents[idx++] = p;
}
}
return opponents;
}
//-------------------------------------------------------------------------
/**
* @param game Current game.
* @param context Current context.
* @param fraction Number of moves to select.
* @return Randomly chosen subset of moves.
*/
public static FastArrayList<Move> selectMoves(final Game game, final Context context, final int fraction)
{
FastArrayList<Move> playerMoves = game.moves(context).moves();
FastArrayList<Move> selectedMoves = new FastArrayList<Move>();
final int target = Math.max(2, (playerMoves.size() + 1) / fraction);
if (target >= playerMoves.size())
return playerMoves;
while (selectedMoves.size() < target)
{
final int r = ThreadLocalRandom.current().nextInt(playerMoves.size());
selectedMoves.add(playerMoves.get(r));
playerMoves.remove(r);
}
return selectedMoves;
}
//-------------------------------------------------------------------------
MoveScore evaluateMoves(final Game game, final Context context)
{
FastArrayList<Move> moves = selectMoves(game, context, fraction);
float bestScore = Float.NEGATIVE_INFINITY;
Move bestMove = moves.get(0);
final int mover = context.state().mover();
//Context contextCurrent = context;
for (Move move: moves)
{
final Context contextCopy = new Context(context);
game.apply(contextCopy, move);
if (contextCopy.trial().status() != null)
{
// Check if move is a winner
final int winner = contextCopy.state().playerToAgent(contextCopy.trial().status().winner());
if (winner == mover)
return new MoveScore(move, WIN_SCORE); // return winning move immediately
if (winner != 0)
continue; // skip losing move
}
float score = 0;
if (continuation && contextCopy.state().mover() == mover)
{
//System.out.println("Recursing...");
return new MoveScore(move, evaluateMoves(game, contextCopy).score());
}
else
{
score = heuristicValueFunction.computeValue
(
contextCopy, mover, ABS_HEURISTIC_WEIGHT_THRESHOLD
);
for (final int opp : opponents(mover, context))
{
if (context.active(opp))
score -= heuristicValueFunction.computeValue(contextCopy, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (context.winners().contains(opp))
score -= PARANOID_OPP_WIN_SCORE;
}
score += (float)(ThreadLocalRandom.current().nextInt(1000) / 1000000.0);
}
if (score > bestScore)
{
bestScore = score;
bestMove = move;
}
}
return new MoveScore(bestMove, bestScore);
}
}
| 8,941 | 26.429448 | 117 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/MAST.java | package search.mcts.playout;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
import other.trial.Trial;
import playout_move_selectors.EpsilonGreedyWrapper;
import search.mcts.MCTS;
import search.mcts.MCTS.ActionStatistics;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
/**
* Move-Average Sampling Technique (MAST) playout strategy (epsilon-greedy)
*
* @author Dennis Soemers
*/
public class MAST implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = -1;
/** For epsilon-greedy move selection */
protected double epsilon = 0.1;
/** For every thread, a MAST-based PlayoutMoveSelector */
protected ThreadLocal<MASTMoveSelector> moveSelector = ThreadLocal.withInitial(() -> new MASTMoveSelector());
//-------------------------------------------------------------------------
/**
* Constructor
*/
public MAST()
{
playoutTurnLimit = -1; // no limit
}
/**
* Constructor
* @param playoutTurnLimit
* @param epsilon
*/
public MAST(final int playoutTurnLimit, final double epsilon)
{
this.playoutTurnLimit = playoutTurnLimit;
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final MASTMoveSelector mast = moveSelector.get();
mast.mcts = mcts;
final Trial trial =
context.game().playout(context, null, 1.0, new EpsilonGreedyWrapper(mast, epsilon), -1, playoutTurnLimit, ThreadLocalRandom.current());
mast.mcts = null;
return trial;
}
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GLOBAL_ACTION_STATS;
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
}
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
//-------------------------------------------------------------------------
/**
* Playout Move Selector for MAST (NOTE: this one is just greedy, need
* to put an epsilon-greedy wrapper around it for epsilon-greedy behaviour).
*
* @author Dennis Soemers
*/
protected static class MASTMoveSelector extends PlayoutMoveSelector
{
/** MCTS from which to get our global action stats */
protected MCTS mcts = null;
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
final FVector actionScores = new FVector(maybeLegalMoves.size());
for (int i = 0; i < maybeLegalMoves.size(); ++i)
{
final ActionStatistics actionStats = mcts.getOrCreateActionStatsEntry(
new MoveKey(maybeLegalMoves.get(i), context.trial().numMoves()));
if (actionStats.visitCount > 0.0)
actionScores.set(i, (float) (actionStats.accumulatedScore / actionStats.visitCount));
else
actionScores.set(i, 1.f);
}
int numLegalMoves = maybeLegalMoves.size();
while (numLegalMoves > 0)
{
--numLegalMoves; // We're trying a move; if this one fails, it's actually not legal
final int n = actionScores.argMaxRand();
final Move move = maybeLegalMoves.get(n);
if (isMoveReallyLegal.checkMove(move))
return move; // Only return this move if it's really legal
else
actionScores.set(n, Float.NEGATIVE_INFINITY); // Illegal action
}
// No legal moves?
return null;
}
}
//-------------------------------------------------------------------------
}
| 4,384 | 24.346821 | 139 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/NST.java | package search.mcts.playout;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.playout.PlayoutMoveSelector;
import other.trial.Trial;
import playout_move_selectors.EpsilonGreedyWrapper;
import search.mcts.MCTS;
import search.mcts.MCTS.ActionStatistics;
import search.mcts.MCTS.MoveKey;
import search.mcts.MCTS.NGramMoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
/**
* N-gram Selection Technique playouts
*
* @author Dennis Soemers
*/
public class NST implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = -1;
/** For epsilon-greedy move selection */
protected double epsilon = 0.1;
/** For every thread, an NST-based PlayoutMoveSelector */
protected ThreadLocal<NSTMoveSelector> moveSelector = ThreadLocal.withInitial(() -> new NSTMoveSelector());
//-------------------------------------------------------------------------
/**
* Constructor
*/
public NST()
{
playoutTurnLimit = -1; // no limit
}
/**
* Constructor
* @param playoutTurnLimit
* @param epsilon
*/
public NST(final int playoutTurnLimit, final double epsilon)
{
this.playoutTurnLimit = playoutTurnLimit;
this.epsilon = epsilon;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
final NSTMoveSelector nst = moveSelector.get();
nst.mcts = mcts;
final Trial trial =
context.game().playout(context, null, 1.0, new EpsilonGreedyWrapper(nst, epsilon), -1, playoutTurnLimit, ThreadLocalRandom.current());
nst.mcts = null;
return trial;
}
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GLOBAL_NGRAM_ACTION_STATS | BackpropagationStrategy.GLOBAL_ACTION_STATS;
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
}
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
//-------------------------------------------------------------------------
/**
* Playout Move Selector for NST (NOTE: this one is just greedy, need
* to put an epsilon-greedy wrapper around it for epsilon-greedy behaviour).
*
* @author Dennis Soemers
*/
protected static class NSTMoveSelector extends PlayoutMoveSelector
{
/** MCTS from which to get our global action stats */
protected MCTS mcts = null;
@Override
public Move selectMove
(
final Context context,
final FastArrayList<Move> maybeLegalMoves,
final int p,
final IsMoveReallyLegal isMoveReallyLegal
)
{
final FVector actionScores = new FVector(maybeLegalMoves.size());
final int maxNGramLength = Math.min(mcts.maxNGramLength(), context.trial().numberRealMoves() + 1);
for (int i = 0; i < maybeLegalMoves.size(); ++i)
{
int numNGramsConsidered = 0;
float scoresSum = 0.f;
// Start with "N-grams" for N = 1
final ActionStatistics actionStats = mcts.getOrCreateActionStatsEntry(
new MoveKey(maybeLegalMoves.get(i), context.trial().numMoves()));
++numNGramsConsidered;
if (actionStats.visitCount > 0.0)
scoresSum += (float) (actionStats.accumulatedScore / actionStats.visitCount);
else
scoresSum += 1.f;
// Now N-grams for N > 1
final List<Move> reverseActionSequence = new ArrayList<Move>();
reverseActionSequence.add(maybeLegalMoves.get(i));
final Iterator<Move> reverseTrialIterator = context.trial().reverseMoveIterator();
for (int n = 2; n <= maxNGramLength; ++n)
{
reverseActionSequence.add(reverseTrialIterator.next());
final Move[] nGram = new Move[n];
for (int j = 0; j < n; ++j)
{
nGram[j] = reverseActionSequence.get(n - j - 1);
}
final ActionStatistics nGramStats =
mcts.getNGramActionStatsEntry
(
new NGramMoveKey
(
nGram,
context.trial().numberRealMoves() - n + 1
)
);
if (nGramStats == null)
break;
++numNGramsConsidered;
scoresSum += (float) (nGramStats.accumulatedScore / nGramStats.visitCount);
}
actionScores.set(i, scoresSum / numNGramsConsidered);
}
int numLegalMoves = maybeLegalMoves.size();
while (numLegalMoves > 0)
{
--numLegalMoves; // We're trying a move; if this one fails, it's actually not legal
final int n = actionScores.argMaxRand();
final Move move = maybeLegalMoves.get(n);
if (isMoveReallyLegal.checkMove(move))
return move; // Only return this move if it's really legal
else
actionScores.set(n, Float.NEGATIVE_INFINITY); // Illegal action
}
// No legal moves?
return null;
}
}
//-------------------------------------------------------------------------
}
| 5,677 | 25.045872 | 138 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/PlayoutHS.java | package search.mcts.playout;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.AI;
import other.context.Context;
import other.move.Move;
import other.playout.HeuristicMoveSelector;
import other.trial.Trial;
import search.mcts.MCTS;
/**
* Playout strategy that selects actions that lead to successor states that
* maximise a heuristic score from the mover's perspective.
*
* We extend the AI abstract class because this means that the outer MCTS
* will also let us init, which allows us to load heuristics from metadata
* if desired. Also means this thing can play games as a standalone AI.
*
* @author Dennis Soemers
*/
public class PlayoutHS extends AI implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/**
* Auto-end playouts in a draw if they take more turns than this, Negative value means
* no limit.
*
* TODO if we have heuristics anyway, might make sense to use them for a non-draw eval..
*/
protected int playoutTurnLimit = -1;
/** Filepath from which we want to load heuristics. Null if we want to load automatically from game's metadata */
protected final String heuristicsFilepath;
/** Heuristic-based PlayoutMoveSelector */
protected HeuristicMoveSelector moveSelector = new HeuristicMoveSelector();
/** Denominator of heuristic threshold fraction, i.e. 1/2, 1/4, 1/8, etc. */
private int fraction = 2;
/** Whether to apply same-turn continuation. */
private boolean continuation = true;
//-------------------------------------------------------------------------
/**
* Default constructor: no cap on actions in playout, heuristics from metadata
*/
public PlayoutHS()
{
playoutTurnLimit = -1; // No limit
heuristicsFilepath = null;
}
/**
* Constructor
* @param heuristicsFilepath Filepath for file specifying heuristics to use
* @throws IOException
* @throws FileNotFoundException
*/
public PlayoutHS(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
this.playoutTurnLimit = -1; // No limit
this.heuristicsFilepath = heuristicsFilepath;
}
//-------------------------------------------------------------------------
public int threshold()
{
return fraction;
}
public void setThreshold(final int value)
{
fraction = value;
friendlyName = "HS (1/" + fraction + ")";
}
public boolean continuation()
{
return continuation;
}
public void setContinuation(final boolean value)
{
continuation = value;
friendlyName = "HS (1/" + fraction + ")" + (continuation ? "*" : "");
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return context.game().playout(context, null, 1.0, moveSelector, -1, playoutTurnLimit, ThreadLocalRandom.current());
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
// TODO
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
@Override
public int backpropFlags()
{
return 0;
}
@Override
public void initAI(final Game game, final int playerID)
{
Heuristics heuristicValueFunction;
if (heuristicsFilepath == null)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction =
new Heuristics
(
new HeuristicTerm[]
{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
}
);
}
}
else
{
heuristicValueFunction = moveSelector.heuristicValueFunction();
if (heuristicValueFunction == null)
{
String heuristicsStr;
try
{
heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction =
(Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
}
catch (final IOException e)
{
e.printStackTrace();
return;
}
}
}
if (heuristicValueFunction != null)
{
heuristicValueFunction.init(game);
moveSelector.setHeuristics(heuristicValueFunction);
}
}
@Override
public Move selectAction
(
final Game game, final Context context, final double maxSeconds,
final int maxIterations, final int maxDepth
)
{
// TODO Auto-generated method stub
System.err.println("Need to implement HeuristicPlayout::selectAction() to let it play as standalone AI!");
return null;
}
//-------------------------------------------------------------------------
}
| 5,462 | 24.059633 | 117 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/PlayoutStrategy.java | package search.mcts.playout;
import java.util.Arrays;
import org.json.JSONObject;
import game.Game;
import other.context.Context;
import other.trial.Trial;
import policies.GreedyPolicy;
import policies.ProportionalPolicyClassificationTree;
import policies.softmax.SoftmaxPolicyLinear;
import policies.softmax.SoftmaxPolicyLogitTree;
import search.mcts.MCTS;
/**
* Interface for Play-out strategies for MCTS
*
* @author Dennis Soemers
*/
public interface PlayoutStrategy
{
//-------------------------------------------------------------------------
/**
* Runs full play-out
*
* @param mcts
* @param context
* @return Trial object at end of playout.
*/
public Trial runPlayout(final MCTS mcts, final Context context);
/**
* Allows a Playout strategy to tell Ludii whether or not it can support playing
* any given game.
*
* @param game
* @return False if the playout strategy cannot be used in a given game
*/
public boolean playoutSupportsGame(final Game game);
/**
* @return Flags indicating stats that should be backpropagated
*/
public int backpropFlags();
//-------------------------------------------------------------------------
/**
* Customise the play-out strategy based on a list of given string inputs.
*
* @param inputs
*/
public void customise(final String[] inputs);
//-------------------------------------------------------------------------
/**
* @param json
* @return Playout strategy constructed from given JSON object
*/
public static PlayoutStrategy fromJson(final JSONObject json)
{
PlayoutStrategy playout = null;
final String strategy = json.getString("strategy");
if (strategy.equalsIgnoreCase("Random"))
{
return new RandomPlayout(200);
}
return playout;
}
//-------------------------------------------------------------------------
/**
* @param inputs
* @return A play-out strategy constructed based on an array of inputs
*/
public static PlayoutStrategy constructPlayoutStrategy(final String[] inputs)
{
PlayoutStrategy playout = null;
if (inputs[0].endsWith("random") || inputs[0].endsWith("randomplayout"))
{
playout = new RandomPlayout();
playout.customise(inputs);
}
else if (inputs[0].endsWith("mast"))
{
playout = new MAST();
playout.customise(inputs);
}
else if (inputs[0].endsWith("nst"))
{
playout = new NST();
playout.customise(inputs);
}
else if (inputs[0].endsWith("softmax") || inputs[0].endsWith("softmaxplayout") || inputs[0].endsWith("softmaxlinear"))
{
playout = new SoftmaxPolicyLinear();
playout.customise(inputs);
}
else if (inputs[0].endsWith("softmaxlogittree"))
{
playout = new SoftmaxPolicyLogitTree();
playout.customise(inputs);
}
else if (inputs[0].endsWith("classificationtreepolicy"))
{
playout = new ProportionalPolicyClassificationTree();
playout.customise(inputs);
}
else if (inputs[0].endsWith("greedy"))
{
playout = new GreedyPolicy();
playout.customise(inputs);
}
else
{
System.err.println("Unknown play-out strategy: " + Arrays.toString(inputs));
}
return playout;
}
//-------------------------------------------------------------------------
}
| 3,234 | 23.323308 | 120 | java |
Ludii | Ludii-master/AI/src/search/mcts/playout/RandomPlayout.java | package search.mcts.playout;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import other.context.Context;
import other.trial.Trial;
import search.mcts.MCTS;
/**
* A completely random Play-out strategy (selects actions according
* to a uniform distribution).
*
* @author Dennis Soemers
*/
public final class RandomPlayout implements PlayoutStrategy
{
//-------------------------------------------------------------------------
/** Auto-end playouts in a draw if they take more turns than this */
protected int playoutTurnLimit = -1;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public RandomPlayout()
{
playoutTurnLimit = -1; // no limit
}
/**
* Constructor
* @param playoutTurnLimit
*/
public RandomPlayout(final int playoutTurnLimit)
{
this.playoutTurnLimit = playoutTurnLimit;
}
//-------------------------------------------------------------------------
@Override
public Trial runPlayout(final MCTS mcts, final Context context)
{
return context.game().playout(context, null, 1.0, null, 0, playoutTurnLimit, ThreadLocalRandom.current());
}
@Override
public int backpropFlags()
{
return 0;
}
//-------------------------------------------------------------------------
@Override
public boolean playoutSupportsGame(final Game game)
{
if (game.isDeductionPuzzle())
return (playoutTurnLimit() > 0);
else
return true;
}
@Override
public void customise(final String[] inputs)
{
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.toLowerCase().startsWith("playoutturnlimit="))
{
playoutTurnLimit =
Integer.parseInt
(
input.substring("playoutturnlimit=".length())
);
}
}
}
/**
* @return The turn limit we use in playouts
*/
public int playoutTurnLimit()
{
return playoutTurnLimit;
}
//-------------------------------------------------------------------------
}
| 2,013 | 19.762887 | 108 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/AG0Selection.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import main.collections.FVector;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Selection strategy used by AlphaGo Zero (described there as a variant of
* PUCB1 proposed by Rosin (2011), but it's a variant, not exactly the same).
*
* @author Dennis Soemers
*
*/
public final class AG0Selection implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Exploration constant for AlphaGo Zero's selection strategy */
protected double explorationConstant;
//-------------------------------------------------------------------------
/**
* Constructor with default exploration constant of 2.5.
*/
public AG0Selection()
{
this(2.5);
}
/**
* Constructor with custom exploration constant
* @param explorationConstant
*/
public AG0Selection(final double explorationConstant)
{
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final FVector distribution = current.learnedSelectionPolicy();
final double parentSqrt = Math.sqrt(current.sumLegalChildVisits());
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate =
current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final int numVisits;
if (child == null)
{
exploit = unvisitedValueEstimate;
numVisits = 0;
}
else
{
exploit = child.exploitationScore(moverAgent);
numVisits = child.numVisits() + child.numVirtualVisits();
}
final float priorProb = distribution.get(i);
final double explore = (parentSqrt == 0.0) ? 1.0 : parentSqrt / (1.0 + numVisits);
final double pucb1Value = exploit + explorationConstant * priorProb * explore;
if (pucb1Value > bestValue)
{
bestValue = pucb1Value;
bestIdx = i;
numBestFound = 1;
}
else if (pucb1Value == bestValue && ThreadLocalRandom.current().nextInt() % ++numBestFound == 0)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// we have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(input.substring("explorationconstant=".length()));
}
else
{
System.err.println("AG0Selection ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 3,567 | 24.855072 | 105 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/ExItSelection.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import main.collections.FVector;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Selection strategy used by Anthony, Tian, and Barber (2017) for
* Expert Iteration
*
* @author Dennis Soemers
*
*/
public final class ExItSelection implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** The standard exploration constant of UCB1 */
protected double explorationConstant;
/**
* Weight parameter for the prior policy term (w_a in the ExIt paper)
*
* Note: paper mentions a good value for this hyperparameter may be
* close to the average number of simulations per action at the root...
* which is going to wildly vary per game and per time-control setting.
*/
protected double priorPolicyWeight;
//-------------------------------------------------------------------------
/**
* Constructor
* @param priorPolicyWeight
*/
public ExItSelection(final double priorPolicyWeight)
{
this(Math.sqrt(2.0), priorPolicyWeight);
}
/**
* Constructor
* @param explorationConstant
* @param priorPolicyWeight
*/
public ExItSelection(final double explorationConstant, final double priorPolicyWeight)
{
this.explorationConstant = explorationConstant;
this.priorPolicyWeight = priorPolicyWeight;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final FVector distribution = current.learnedSelectionPolicy();
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate =
current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final double explore;
final int numVisits;
if (child == null)
{
exploit = unvisitedValueEstimate;
numVisits = 0;
explore = Math.sqrt(parentLog);
}
else
{
exploit = child.exploitationScore(moverAgent);
numVisits = child.numVisits() + child.numVirtualVisits();
explore = Math.sqrt(parentLog / numVisits);
}
final float priorProb = distribution.get(i);
final double priorTerm = priorProb / (numVisits + 1);
final double ucb1pValue =
exploit +
explorationConstant * explore +
priorPolicyWeight * priorTerm;
if (ucb1pValue > bestValue)
{
bestValue = ucb1pValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1pValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// we have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(
input.substring("explorationconstant=".length()));
}
else
{
System.err.println("ExItSelection ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 4,191 | 24.876543 | 87 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/McBRAVE.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
import search.mcts.nodes.BaseNode;
import search.mcts.nodes.BaseNode.NodeStatistics;
/**
* TODO
*
* @author Dennis Soemers
*/
public class McBRAVE implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Hyperparameter used in computation of weight for AMAF term */
protected final double bias;
//-------------------------------------------------------------------------
/**
* Constructor with default value of bias = 10^(-6),
* loosely based on hyperparameter tuning in GRAVE paper (though
* that paper found many different optimal values for different games).
*/
public McBRAVE()
{
this.bias = 10.0e-6;
}
/**
* Constructor
* @param bias
*/
public McBRAVE(final double bias)
{
this.bias = bias;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double meanScore;
final double meanAMAF;
final double beta;
if (child == null)
{
meanScore = unvisitedValueEstimate;
meanAMAF = 0.0;
beta = 0.0;
}
else
{
meanScore = child.exploitationScore(moverAgent);
final Move move = child.parentMove();
int accumVisits = 0;
double accumScore = 0.0;
final MoveKey moveKey = new MoveKey(move, current.contextRef().trial().numMoves());
BaseNode raveNode = current;
while (raveNode != null)
{
final NodeStatistics graveStats = raveNode.graveStats(moveKey);
if (graveStats != null)
{
accumScore += graveStats.accumulatedScore;
accumVisits += graveStats.visitCount;
}
raveNode = raveNode.parent();
}
if (accumVisits == 0)
{
meanAMAF = 0.0;
beta = 0.0;
}
else
{
final int childVisits = child.numVisits() + child.numVirtualVisits();
meanAMAF = accumScore / accumVisits;
beta = accumVisits / (accumVisits + childVisits + bias * accumVisits * childVisits);
}
}
final double graveValue = (1.0 - beta) * meanScore + beta * meanAMAF;
if (graveValue > bestValue)
{
bestValue = graveValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
graveValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GRAVE_STATS;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
// TODO
}
//-------------------------------------------------------------------------
}
| 3,919 | 24.290323 | 97 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/McGRAVE.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
import search.mcts.nodes.BaseNode;
import search.mcts.nodes.BaseNode.NodeStatistics;
/**
* A Monte-Carlo variant of Generalized Rapid Action Value Estimation (GRAVE).
* This is basically exactly the variant described in the Tristan Cazenave's
* IJCAI 2015 paper; with no exploration term, pure exploitation.
*
* Note that Subsection 5.2 of Gelly and Silver's 2011 paper in Artificial Intelligence
* describes that they found MC-RAVE (their MC-variant of RAVE, without exploration) to
* outperform UCT-RAVE (their UCT-variant of RAVE, with exploration).
*
* With ref = 0, this is equivalent to Gelly and Silver's MC-RAVE.
*
* @author Dennis Soemers
*/
public class McGRAVE implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Threshold number of playouts that a node must have had for its AMAF values to be used */
protected final int ref;
/** Hyperparameter used in computation of weight for AMAF term */
protected final double bias;
/** Reference node in current MCTS simulation (one per thread, in case of multi-threaded MCTS) */
protected ThreadLocal<BaseNode> currentRefNode = ThreadLocal.withInitial(() -> null);
//-------------------------------------------------------------------------
/**
* Constructor with default values of ref = 100 and bias = 10^(-6),
* loosely based on hyperparameter tuning in GRAVE paper (though
* that paper found many different optimal values for different games).
*/
public McGRAVE()
{
this.ref = 100;
this.bias = 10.0e-6;
}
/**
* Constructor
* @param ref
* @param bias
*/
public McGRAVE(final int ref, final double bias)
{
this.ref = ref;
this.bias = bias;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
if (currentRefNode.get() == null || current.numVisits() > ref || current.parent() == null)
currentRefNode.set(current);
//System.out.println("selecting for current node = " + current + ". Mover = " + current.contextRef().state().mover());
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double meanScore;
final double meanAMAF;
final double beta;
if (child == null)
{
meanScore = unvisitedValueEstimate;
meanAMAF = 0.0;
beta = 0.0;
}
else
{
meanScore = child.exploitationScore(moverAgent);
final Move move = child.parentMove();
final NodeStatistics graveStats = currentRefNode.get().graveStats(new MoveKey(move, current.contextRef().trial().numMoves()));
// if (graveStats == null)
// {
// System.out.println("currentRefNode = " + currentRefNode.get());
// System.out.println("stats for " + new MoveKey(move, current.contextRef().trial().numMoves()) + " in " + currentRefNode.get() + " = " + graveStats);
// System.out.println("child visits = " + child.numVisits());
// System.out.println("current.who = " + current.contextRef().containerState(0).cloneWho().toChunkString());
// System.out.println("current legal actions = " + Arrays.toString(((Node) current).legalActions()));
// System.out.println("current context legal moves = " + current.contextRef().activeGame().moves(current.contextRef()));
// }
if (graveStats == null)
{
// In single-threaded MCTS this should always be a bug,
// but in multi-threaded MCTS it can happen
meanAMAF = 0.0;
beta = 0.0;
}
else
{
final double graveScore = graveStats.accumulatedScore;
final int graveVisits = graveStats.visitCount;
final int childVisits = child.numVisits() + child.numVirtualVisits();
meanAMAF = graveScore / graveVisits;
beta = graveVisits / (graveVisits + childVisits + bias * graveVisits * childVisits);
}
}
final double graveValue = (1.0 - beta) * meanScore + beta * meanAMAF;
if (graveValue > bestValue)
{
bestValue = graveValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
graveValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
// This can help garbage collector to clean up a bit more easily
if (current.childForNthLegalMove(bestIdx) == null)
currentRefNode.set(null);
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GRAVE_STATS;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
// TODO
}
//-------------------------------------------------------------------------
}
| 5,816 | 31.864407 | 160 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/NoisyAG0Selection.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import main.collections.FVector;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* A noisy variant of the AlphaGo Zero selection phase; mixes the prior
* policy with a uniform policy.
*
* @author Dennis Soemers
*
*/
public final class NoisyAG0Selection implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Exploration constant for AlphaGo Zero's selection strategy */
protected double explorationConstant;
/** Weight to assign to the uniform distribution */
protected double uniformDistWeight;
//-------------------------------------------------------------------------
/**
* Constructor with default exploration constant of 2.5 and weight of 0.25
* for the uniform distribution.
*/
public NoisyAG0Selection()
{
this(2.5, 0.25);
}
/**
* Constructor with custom hyperparams
* @param explorationConstant
*/
public NoisyAG0Selection(final double explorationConstant, final double uniformDistWeight)
{
this.explorationConstant = explorationConstant;
this.uniformDistWeight = uniformDistWeight;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final int numChildren = current.numLegalMoves();
final FVector distribution = current.learnedSelectionPolicy().copy();
distribution.mult((float) (1.0 - uniformDistWeight));
final FVector uniformDist = new FVector(numChildren);
uniformDist.fill(0, numChildren, (float)(uniformDistWeight / numChildren));
distribution.add(uniformDist);
final double parentSqrt = Math.sqrt(current.sumLegalChildVisits());
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate =
current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final int numVisits;
if (child == null)
{
exploit = unvisitedValueEstimate;
numVisits = 0;
}
else
{
exploit = child.exploitationScore(moverAgent);
numVisits = child.numVisits() + child.numVirtualVisits();
}
final float priorProb = distribution.get(i);
final double explore = (parentSqrt == 0.0) ? 1.0 : parentSqrt / (1.0 + numVisits);
final double pucb1Value = exploit + explorationConstant * priorProb * explore;
if (pucb1Value > bestValue)
{
bestValue = pucb1Value;
bestIdx = i;
numBestFound = 1;
}
else if (pucb1Value == bestValue && ThreadLocalRandom.current().nextInt() % ++numBestFound == 0)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// we have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(input.substring("explorationconstant=".length()));
}
else if (input.startsWith("uniformdistweight="))
{
uniformDistWeight = Double.parseDouble(input.substring("uniformdistweight=".length()));
}
else
{
System.err.println("NoisyAG0Selection ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 4,185 | 26.539474 | 105 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/ProgressiveBias.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Progressive Bias, as described in "Progressive Strategies for
* Monte-Carlo Tree Search" by Chaslot et al.
*
* Assumes that a heuristic function has been defined inside the MCTS object.
*
* @author Dennis Soemers
*/
public final class ProgressiveBias implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Exploration constant */
protected double explorationConstant;
//-------------------------------------------------------------------------
/**
* Constructor with default value sqrt(2.0) for exploration constant
*/
public ProgressiveBias()
{
this(Math.sqrt(2.0));
}
/**
* Constructor with parameter for exploration constant
* @param explorationConstant
*/
public ProgressiveBias(final double explorationConstant)
{
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
assert(mcts.heuristics() != null);
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate =
current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final double explore;
final double heuristicScore;
if (child == null)
{
exploit = unvisitedValueEstimate;
explore = Math.sqrt(parentLog);
heuristicScore = unvisitedValueEstimate;
}
else
{
exploit = child.exploitationScore(moverAgent);
final int numVisits = child.numVisits() + child.numVirtualVisits();
explore = Math.sqrt(parentLog / numVisits);
// No idea what kind of weight we should use, just guessing 10.0 for now based on nothing
heuristicScore = (10.0 * child.heuristicValueEstimates()[moverAgent]) / numVisits;
}
final double ucb1Value = exploit + explorationConstant * explore + heuristicScore;
if (ucb1Value > bestValue)
{
bestValue = ucb1Value;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1Value == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return MCTS.HEURISTIC_INIT;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// we have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(
input.substring("explorationconstant=".length()));
}
else
{
System.err.println("Progressive Bias ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 3,877 | 25.380952 | 99 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/ProgressiveHistory.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.MCTS.ActionStatistics;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
import search.mcts.nodes.BaseNode;
/**
* Progressive History, as described by Nijssen and Winands (2011)
*
* @author Dennis Soemers
*/
public class ProgressiveHistory implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** The W hyperparameter / weight for Progressive History */
protected final double progressiveBiasInfluence;
/** Exploration constant */
protected double explorationConstant;
//-------------------------------------------------------------------------
/**
* Constructor with default value of W = 3.0,
* loosely based on results from (Nijssen and Winands, 2011).
*/
public ProgressiveHistory()
{
this.progressiveBiasInfluence = 3.0;
this.explorationConstant = Math.sqrt(2.0);
}
/**
* Constructor
* @param progressiveBiasInfluence
* @param explorationConstant
*/
public ProgressiveHistory(final double progressiveBiasInfluence, final double explorationConstant)
{
this.progressiveBiasInfluence = progressiveBiasInfluence;
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
//System.out.println("selecting for current node = " + current + ". Mover = " + current.contextRef().state().mover());
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double explore;
final double meanScore;
final double meanGlobalActionScore;
final int childNumVisits = child == null ? 0 : child.numVisits() + child.numVirtualVisits();
final Move move = current.nthLegalMove(i);
final ActionStatistics actionStats = mcts.getOrCreateActionStatsEntry(new MoveKey(move, current.contextRef().trial().numMoves()));
if (actionStats.visitCount == 0)
meanGlobalActionScore = unvisitedValueEstimate;
else
meanGlobalActionScore = actionStats.accumulatedScore / actionStats.visitCount;
if (child == null)
{
meanScore = unvisitedValueEstimate;
explore = Math.sqrt(parentLog);
}
else
{
meanScore = child.exploitationScore(moverAgent);
explore = Math.sqrt(parentLog / childNumVisits);
}
final double ucb1PhValue = meanScore + explorationConstant * explore
+ meanGlobalActionScore * (progressiveBiasInfluence / ((1.0 - meanScore) * childNumVisits + 1));
if (ucb1PhValue > bestValue)
{
bestValue = ucb1PhValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1PhValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GLOBAL_ACTION_STATS;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// We have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(input.substring("explorationconstant=".length()));
}
else
{
System.err.println("ProgressiveHistory ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 4,600 | 28.305732 | 139 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/SelectionStrategy.java | package search.mcts.selection;
import org.json.JSONObject;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* Interface for Selection strategies for MCTS
*
* @author Dennis Soemers
*
*/
public interface SelectionStrategy
{
//-------------------------------------------------------------------------
/**
* Should be implemented to select the index of a child of the current
* node to traverse to.
*
* @param mcts
* @param current
* @return Index of child.
*/
public int select(final MCTS mcts, final BaseNode current);
//-------------------------------------------------------------------------
/**
* @return Flags indicating stats that should be backpropagated
*/
public int backpropFlags();
/**
* @return Flags indicating special things we want to do when expanding nodes
*/
public int expansionFlags();
/**
* Customize the selection strategy based on a list of given string inputs
*
* @param inputs
*/
public void customise(final String[] inputs);
//-------------------------------------------------------------------------
/**
* @param json
* @return Selection strategy constructed from given JSON object
*/
public static SelectionStrategy fromJson(final JSONObject json)
{
SelectionStrategy selection = null;
final String strategy = json.getString("strategy");
if (strategy.equalsIgnoreCase("UCB1"))
{
return new UCB1();
}
return selection;
}
//-------------------------------------------------------------------------
}
| 1,544 | 21.071429 | 78 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/UCB1.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* UCB1 Selection Strategy, as commonly used in UCT.
*
* @author Dennis Soemers
*/
public final class UCB1 implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Exploration constant */
protected double explorationConstant;
//-------------------------------------------------------------------------
/**
* Constructor with default value sqrt(2.0) for exploration constant
*/
public UCB1()
{
this(Math.sqrt(2.0));
}
/**
* Constructor with parameter for exploration constant
* @param explorationConstant
*/
public UCB1(final double explorationConstant)
{
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final double explore;
if (child == null)
{
exploit = unvisitedValueEstimate;
explore = Math.sqrt(parentLog);
}
else
{
exploit = child.exploitationScore(moverAgent);
final int numVisits = child.numVisits() + child.numVirtualVisits();
explore = Math.sqrt(parentLog / numVisits);
}
final double ucb1Value = exploit + explorationConstant * explore;
//System.out.println("ucb1Value = " + ucb1Value);
//System.out.println("exploit = " + exploit);
//System.out.println("explore = " + explore);
if (ucb1Value > bestValue)
{
bestValue = ucb1Value;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1Value == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// We have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(
input.substring("explorationconstant=".length()));
}
else
{
System.err.println("UCB1 ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 3,473 | 23.992806 | 97 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/UCB1GRAVE.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.move.Move;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.MCTS.MoveKey;
import search.mcts.backpropagation.BackpropagationStrategy;
import search.mcts.nodes.BaseNode;
import search.mcts.nodes.BaseNode.NodeStatistics;
/**
* A UCB1 variant of Generalized Rapid Action Value Estimation (GRAVE).
* This variant differs from MC-GRAVE in that it also uses a UCB1-style
* exploration term.
*
* Note that Subsection 5.2 of Gelly and Silver's 2011 paper in Artificial Intelligence
* describes that they found MC-RAVE (their MC-variant of RAVE, without exploration) to
* outperform UCT-RAVE (their UCT-variant of RAVE, with exploration).
*
* With ref = 0, this is equivalent to Gelly and Silver's UCT-RAVEe.
*
* @author Dennis Soemers
*/
public class UCB1GRAVE implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Threshold number of playouts that a node must have had for its AMAF values to be used */
protected final int ref;
/** Hyperparameter used in computation of weight for AMAF term */
protected final double bias;
/** Exploration constant */
protected double explorationConstant;
/** Reference node in current MCTS simulation (one per thread, in case of multi-threaded MCTS) */
protected ThreadLocal<BaseNode> currentRefNode = ThreadLocal.withInitial(() -> null);
//-------------------------------------------------------------------------
/**
* Constructor with default values of ref = 100 and bias = 10^(-6),
* loosely based on hyperparameter tuning in GRAVE paper (though
* that paper found many different optimal values for different games).
*/
public UCB1GRAVE()
{
this.ref = 100;
this.bias = 10.0e-6;
this.explorationConstant = Math.sqrt(2.0);
}
/**
* Constructor
* @param ref
* @param bias
* @param explorationConstant
*/
public UCB1GRAVE(final int ref, final double bias, final double explorationConstant)
{
this.ref = ref;
this.bias = bias;
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
if (currentRefNode.get() == null || current.numVisits() > ref || current.parent() == null)
currentRefNode.set(current);
//System.out.println("selecting for current node = " + current + ". Mover = " + current.contextRef().state().mover());
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double explore;
final double meanScore;
final double meanAMAF;
final double beta;
if (child == null)
{
meanScore = unvisitedValueEstimate;
meanAMAF = 0.0;
beta = 0.0;
explore = Math.sqrt(parentLog);
}
else
{
meanScore = child.exploitationScore(moverAgent);
final Move move = child.parentMove();
final NodeStatistics graveStats = currentRefNode.get().graveStats(new MoveKey(move, current.contextRef().trial().numMoves()));
// if (graveStats == null)
// {
// System.out.println("currentRefNode = " + currentRefNode.get());
// System.out.println("stats for " + new MoveKey(move) + " in " + currentRefNode.get() + " = " + graveStats);
// System.out.println("child visits = " + child.numVisits());
// System.out.println("current.who = " + current.contextRef().containerState(0).cloneWho().toChunkString());
// System.out.println("current legal actions = " + Arrays.toString(((Node) current).legalActions()));
// System.out.println("current context legal moves = " + current.contextRef().activeGame().moves(current.contextRef()));
// }
final double graveScore = graveStats.accumulatedScore;
final int graveVisits = graveStats.visitCount;
final int childVisits = child.numVisits() + child.numVirtualVisits();
meanAMAF = graveScore / graveVisits;
beta = graveVisits / (graveVisits + childVisits + bias * graveVisits * childVisits);
explore = Math.sqrt(parentLog / childVisits);
}
final double graveValue = (1.0 - beta) * meanScore + beta * meanAMAF;
final double ucb1GraveValue = graveValue + explorationConstant * explore;
if (ucb1GraveValue > bestValue)
{
bestValue = ucb1GraveValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1GraveValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
// This can help garbage collector to clean up a bit more easily
if (current.childForNthLegalMove(bestIdx) == null)
currentRefNode.set(null);
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return BackpropagationStrategy.GRAVE_STATS;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// We have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(input.substring("explorationconstant=".length()));
}
else
{
System.err.println("UCB1GRAVE ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 6,417 | 32.082474 | 136 | java |
Ludii | Ludii-master/AI/src/search/mcts/selection/UCB1Tuned.java | package search.mcts.selection;
import java.util.concurrent.ThreadLocalRandom;
import other.state.State;
import search.mcts.MCTS;
import search.mcts.nodes.BaseNode;
/**
* UCB1-Tuned Selection strategy. The original paper by Auer et al. used 1/4 as the
* upper bound on the variance of a Bernoulli random variable. We expect values
* in the [-1, 1] range, rather than the [0, 1] range in our MCTS, so
* we use 1 as an upper bound on the variance of this random variable.
*
* @author Dennis Soemers
*/
public final class UCB1Tuned implements SelectionStrategy
{
//-------------------------------------------------------------------------
/** Upper bound on variance of random variable in [-1, 1] range */
protected static final double VARIANCE_UPPER_BOUND = 1.0;
/** Exploration constant */
protected double explorationConstant;
//-------------------------------------------------------------------------
/**
* Constructor with default value sqrt(2.0) for exploration constant
*/
public UCB1Tuned()
{
this(Math.sqrt(2.0));
}
/**
* Constructor with parameter for exploration constant
* @param explorationConstant
*/
public UCB1Tuned(final double explorationConstant)
{
this.explorationConstant = explorationConstant;
}
//-------------------------------------------------------------------------
@Override
public int select(final MCTS mcts, final BaseNode current)
{
int bestIdx = -1;
double bestValue = Double.NEGATIVE_INFINITY;
int numBestFound = 0;
final double parentLog = Math.log(Math.max(1, current.sumLegalChildVisits()));
final int numChildren = current.numLegalMoves();
final State state = current.contextRef().state();
final int moverAgent = state.playerToAgent(state.mover());
final double unvisitedValueEstimate = current.valueEstimateUnvisitedChildren(moverAgent);
for (int i = 0; i < numChildren; ++i)
{
final BaseNode child = current.childForNthLegalMove(i);
final double exploit;
final double sampleVariance;
final double visitsFraction;
if (child == null)
{
exploit = unvisitedValueEstimate;
sampleVariance = VARIANCE_UPPER_BOUND;
visitsFraction = parentLog;
}
else
{
exploit = child.exploitationScore(moverAgent);
final int numChildVisits = child.numVisits() + child.numVirtualVisits();
sampleVariance = Math.max(child.sumSquaredScores(moverAgent) / numChildVisits - exploit*exploit, 0.0);
visitsFraction = parentLog / numChildVisits;
}
final double ucb1TunedValue = exploit +
Math.sqrt
(
visitsFraction * Math.min(VARIANCE_UPPER_BOUND, sampleVariance + explorationConstant * Math.sqrt(visitsFraction))
);
if (ucb1TunedValue > bestValue)
{
bestValue = ucb1TunedValue;
bestIdx = i;
numBestFound = 1;
}
else if
(
ucb1TunedValue == bestValue
&&
ThreadLocalRandom.current().nextInt() % ++numBestFound == 0
)
{
bestIdx = i;
}
}
return bestIdx;
}
//-------------------------------------------------------------------------
@Override
public int backpropFlags()
{
return 0;
}
@Override
public int expansionFlags()
{
return 0;
}
@Override
public void customise(final String[] inputs)
{
if (inputs.length > 1)
{
// We have more inputs than just the name of the strategy
for (int i = 1; i < inputs.length; ++i)
{
final String input = inputs[i];
if (input.startsWith("explorationconstant="))
{
explorationConstant = Double.parseDouble(input.substring("explorationconstant=".length()));
}
else
{
System.err.println("UCB1Tuned ignores unknown customisation: " + input);
}
}
}
}
//-------------------------------------------------------------------------
}
| 4,066 | 26.47973 | 125 | java |
Ludii | Ludii-master/AI/src/search/minimax/AlphaBetaSearch.java | package search.minimax;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.RankUtils;
import other.context.Context;
import other.move.Move;
import other.state.State;
import other.trial.Trial;
import training.expert_iteration.ExItExperience;
import training.expert_iteration.ExItExperience.ExItExperienceState;
import training.expert_iteration.ExpertPolicy;
import utils.data_structures.transposition_table.TranspositionTable;
import utils.data_structures.transposition_table.TranspositionTable.ABTTData;
/**
* Implementation of alpha-beta search. Assumes perfect-information games.
* Uses iterative deepening when time-restricted, goes straight for
* depth limit when only depth-limited. Extracts heuristics to use from game's metadata.
*
* For games with > 2 players, we use Paranoid search (i.e. all other players
* just try to minimise the score for the maximising player).
*
* @author Dennis Soemers
*/
public class AlphaBetaSearch extends ExpertPolicy
{
//-------------------------------------------------------------------------
/**
* Controls whether searches can search to any depth, or only odd
* or only even depths.
*
* @author Dennis Soemers
*/
public static enum AllowedSearchDepths
{
/** Allow any search depth */
Any,
/** Allow only even search depths */
Even,
/** Allow only odd search depths */
Odd
}
//-------------------------------------------------------------------------
/** Value we use to initialise alpha ("negative infinity", but not really) */
public static final float ALPHA_INIT = -1000000.f;
/** Value we use to initialise beta ("positive infinity", but not really) */
public static final float BETA_INIT = -ALPHA_INIT;
/** Score we give to winning opponents in paranoid searches in states where game is still going (> 2 players) */
public static final float PARANOID_OPP_WIN_SCORE = 10000.f;
/** We skip computing heuristics with absolute weight value lower than this */
public static final float ABS_HEURISTIC_WEIGHT_THRESHOLD = 0.001f;
//-------------------------------------------------------------------------
/** Our heuristic value function estimator */
protected Heuristics heuristicValueFunction = null;
/** If true, we read our heuristic function to use from game's metadata */
protected final boolean heuristicsFromMetadata;
/** We'll automatically return our move after at most this number of seconds if we only have one move */
protected double autoPlaySeconds = 0.0;
/** Estimated score of the root node based on last-run search */
protected float estimatedRootScore = 0.f;
/** The maximum heuristic eval we have ever observed */
protected float maxHeuristicEval = 0.f;
/** The minimum heuristic eval we have ever observed */
protected float minHeuristicEval = 0.f;
/** String to print to Analysis tab of the Ludii app */
protected String analysisReport = null;
/** Current list of moves available in root */
protected FastArrayList<Move> currentRootMoves = null;
/** The last move we returned. Need to memorise this for Expert Iteration with AlphaBeta */
protected Move lastReturnedMove = null;
/** Root context for which we've last performed a search */
protected Context lastSearchedRootContext = null;
/** Value estimates of moves available in root */
protected FVector rootValueEstimates = null;
/** The number of players in the game we're currently playing */
protected int numPlayersInGame = 0;
/** Remember if we proved a win in one of our searches */
protected boolean provedWin = false;
/** Needed for visualisations */
protected float rootAlphaInit = ALPHA_INIT;
/** Needed for visualisations */
protected float rootBetaInit = BETA_INIT;
/** Sorted (hopefully cleverly) list of moves available in root node */
protected FastArrayList<Move> sortedRootMoves = null;
/** If true at end of a search, it means we searched full tree (probably proved a draw) */
protected boolean searchedFullTree = false;
/** Do we want to allow using Transposition Table? */
protected boolean allowTranspositionTable = true;
/** Transposition Table */
protected TranspositionTable transpositionTable = null;
/** Do we allow any search depth, or only odd, or only even? */
protected AllowedSearchDepths allowedSearchDepths = AllowedSearchDepths.Any;
//-------------------------------------------------------------------------
/**
* Creates a standard alpha-beta searcher.
* @return Alpha-beta search algorithm.
*/
public static AlphaBetaSearch createAlphaBeta()
{
return new AlphaBetaSearch();
}
//-------------------------------------------------------------------------
/**
* Constructor
*/
public AlphaBetaSearch()
{
friendlyName = "Alpha-Beta";
heuristicsFromMetadata = true;
}
/**
* Constructor
* @param allowTranspositionTable
*/
public AlphaBetaSearch(final boolean allowTranspositionTable)
{
friendlyName = "Alpha-Beta";
heuristicsFromMetadata = true;
this.allowTranspositionTable = allowTranspositionTable;
}
/**
* Constructor
* @param heuristicsFilepath
* @throws IOException
* @throws FileNotFoundException
*/
public AlphaBetaSearch(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
friendlyName = "Alpha-Beta";
final String heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction = (Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
heuristicsFromMetadata = false;
}
/**
* Constructor
* @param heuristics
*/
public AlphaBetaSearch(final Heuristics heuristics)
{
friendlyName = "Alpha-Beta";
heuristicValueFunction = heuristics;
heuristicsFromMetadata = false;
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
provedWin = false;
final int depthLimit = maxDepth > 0 ? maxDepth : Integer.MAX_VALUE;
lastSearchedRootContext = context;
if (transpositionTable != null)
transpositionTable.allocate();
final int initDepth = allowedSearchDepths == AllowedSearchDepths.Even ? 2 : 1;
if (maxSeconds > 0)
{
final long startTime = System.currentTimeMillis();
final long stopTime = startTime + (long) (maxSeconds * 1000);
// First do normal iterative deepening alphabeta (paranoid if > 2 players)
lastReturnedMove = iterativeDeepening(game, context, maxSeconds, depthLimit, initDepth);
final long currentTime = System.currentTimeMillis();
if (game.players().count() > 2 && currentTime < stopTime)
{
// We still have time left in game with > 2 players;
// this probably means that paranoid search proved a win or a loss
// If a win for us was proven even under paranoid assumption, just play it!
if (provedWin)
{
if (transpositionTable != null)
transpositionTable.deallocate();
return lastReturnedMove;
}
// Otherwise, we assume a loss was proven under paranoid assumption.
// This can lead to poor play in end-games (or extremely simple games) due
// to unrealistic paranoid assumption, so now we switch to Max^N and run again
lastReturnedMove = iterativeDeepeningMaxN(game, context, (stopTime - currentTime) / 1000.0, depthLimit, initDepth);
}
if (transpositionTable != null)
transpositionTable.deallocate();
return lastReturnedMove;
}
else
{
// We'll just do iterative deepening with the depth limit as starting depth
lastReturnedMove = iterativeDeepening(game, context, maxSeconds, depthLimit, depthLimit);
if (transpositionTable != null)
transpositionTable.deallocate();
return lastReturnedMove;
}
}
//-------------------------------------------------------------------------
/**
* Runs iterative deepening alpha-beta
* @param game
* @param context
* @param maxSeconds
* @param maxDepth
* @param startDepth
* @return Move to play
*/
public Move iterativeDeepening
(
final Game game,
final Context context,
final double maxSeconds,
final int maxDepth,
final int startDepth
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
final int numPlayers = game.players().count();
currentRootMoves = new FastArrayList<Move>(game.moves(context).moves());
// Create a shuffled version of list of moves (random tie-breaking)
final FastArrayList<Move> tempMovesList = new FastArrayList<Move>(currentRootMoves);
sortedRootMoves = new FastArrayList<Move>(currentRootMoves.size());
while (!tempMovesList.isEmpty())
{
sortedRootMoves.add(tempMovesList.removeSwap(ThreadLocalRandom.current().nextInt(tempMovesList.size())));
}
final int numRootMoves = sortedRootMoves.size();
final List<ScoredMove> scoredMoves = new ArrayList<ScoredMove>(sortedRootMoves.size());
if (numRootMoves == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
// Vector for visualisation purposes
rootValueEstimates = new FVector(currentRootMoves.size());
// Storing scores found for purpose of move ordering
final FVector moveScores = new FVector(numRootMoves);
final int searchDepthIncrement = allowedSearchDepths == AllowedSearchDepths.Any ? 1 : 2;
int searchDepth = startDepth - searchDepthIncrement;
final int maximisingPlayer = context.state().playerToAgent(context.state().mover());
// Best move found so far during a fully-completed search
// (ignoring incomplete early-terminated search)
Move bestMoveCompleteSearch = sortedRootMoves.get(0);
if (numPlayers > 2)
{
// For paranoid search, we can narrow alpha-beta window if some players already won/lost
rootAlphaInit = ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT;
rootBetaInit = ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT;
}
else
{
rootAlphaInit = ALPHA_INIT;
rootBetaInit = BETA_INIT;
}
while (searchDepth < maxDepth)
{
searchDepth += searchDepthIncrement;
searchedFullTree = true;
//System.out.println("SEARCHING TO DEPTH: " + searchDepth);
// the real alpha-beta stuff starts here
float score = rootAlphaInit;
float alpha = rootAlphaInit;
final float beta = rootBetaInit;
// best move during this particular search
Move bestMove = sortedRootMoves.get(0);
for (int i = 0; i < numRootMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = sortedRootMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, searchDepth - 1, alpha, beta, maximisingPlayer, stopTime);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
bestMove = null;
break;
}
final int origMoveIdx = currentRootMoves.indexOf(m);
if (origMoveIdx >= 0)
{
rootValueEstimates.set(origMoveIdx, (float) scoreToValueEst(value, rootAlphaInit, rootBetaInit));
}
moveScores.set(i, value);
if (value > score) // new best move found
{
//System.out.println("New best move: " + m + " with eval = " + value);
score = value;
bestMove = m;
}
if (score > alpha) // new lower bound
alpha = score;
if (alpha >= beta) // beta cut-off
break;
}
// alpha-beta is over, this is iterative deepening stuff again
if (bestMove != null) // search was not interrupted
{
estimatedRootScore = score;
if (score == rootBetaInit)
{
// we've just proven a win, so we can return best move
// found during this search
analysisReport = friendlyName + " (player " + maximisingPlayer + ") found a proven win at depth " + searchDepth + ".";
provedWin = true;
return bestMove;
}
else if (score == rootAlphaInit)
{
// we've just proven a loss, so we return the best move
// of the PREVIOUS search (delays loss for the longest
// amount of time)
analysisReport = friendlyName + " (player " + maximisingPlayer + ") found a proven loss at depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
else if (searchedFullTree)
{
// We've searched full tree but did not prove a win or loss
// probably means a draw, play best line we have
analysisReport = friendlyName + " (player " + maximisingPlayer + ") completed search of depth " + searchDepth + " (no proven win or loss).";
return bestMove;
}
bestMoveCompleteSearch = bestMove;
}
else
{
// decrement because we didn't manage to complete this search
searchDepth -= searchDepthIncrement;
}
if (System.currentTimeMillis() >= stopTime || wantsInterrupt)
{
// we need to return
analysisReport = friendlyName + " (player " + maximisingPlayer + ") completed search of depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
// order moves based on scores found, for next search
scoredMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
scoredMoves.add(new ScoredMove(sortedRootMoves.get(i), moveScores.get(i)));
}
Collections.sort(scoredMoves);
sortedRootMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
sortedRootMoves.add(scoredMoves.get(i).move);
}
// clear the vector of scores
moveScores.fill(0, numRootMoves, 0.f);
}
analysisReport = friendlyName + " (player " + maximisingPlayer + ") completed search of depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
/**
* Recursive alpha-beta search function.
*
* @param context
* @param depth
* @param inAlpha
* @param inBeta
* @param maximisingPlayer Who is the maximising player?
* @param stopTime
* @return (heuristic) evaluation of the reached state, from perspective of maximising player.
*/
public float alphaBeta
(
final Context context,
final int depth,
final float inAlpha,
final float inBeta,
final int maximisingPlayer,
final long stopTime
)
{
final Trial trial = context.trial();
final State state = context.state();
final float originalAlpha = inAlpha;
float alpha = inAlpha;
float beta = inBeta;
final long zobrist = state.fullHash(context);
final ABTTData tableData;
if (transpositionTable != null)
{
tableData = transpositionTable.retrieve(zobrist);
if (tableData != null)
{
if (tableData.depth >= depth)
{
// Already searched deep enough for data in TT, use results
switch (tableData.valueType)
{
case TranspositionTable.EXACT_VALUE:
return tableData.value;
case TranspositionTable.LOWER_BOUND:
alpha = Math.max(alpha, tableData.value);
break;
case TranspositionTable.UPPER_BOUND:
beta = Math.min(beta, tableData.value);
break;
default:
System.err.println("INVALID TRANSPOSITION TABLE DATA!");
break;
}
if (alpha >= beta)
return tableData.value;
}
}
}
else
{
tableData = null;
}
if (trial.over() || !context.active(maximisingPlayer))
{
// terminal node (at least for maximising player)
return (float) RankUtils.agentUtilities(context)[maximisingPlayer] * BETA_INIT;
}
else if (depth == 0)
{
searchedFullTree = false;
// heuristic evaluation
float heuristicScore = heuristicValueFunction().computeValue(
context, maximisingPlayer, ABS_HEURISTIC_WEIGHT_THRESHOLD);
for (final int opp : opponents(maximisingPlayer))
{
if (context.active(opp))
heuristicScore -= heuristicValueFunction().computeValue(context, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (context.winners().contains(opp))
heuristicScore -= PARANOID_OPP_WIN_SCORE;
}
// Invert scores if players swapped
if (state.playerToAgent(maximisingPlayer) != maximisingPlayer)
heuristicScore = -heuristicScore;
minHeuristicEval = Math.min(minHeuristicEval, heuristicScore);
maxHeuristicEval = Math.max(maxHeuristicEval, heuristicScore);
return heuristicScore;
}
final Game game = context.game();
final int mover = state.playerToAgent(state.mover());
FastArrayList<Move> legalMoves = game.moves(context).moves();
final int numLegalMoves = legalMoves.size();
if (tableData != null)
{
// Put best move according to Transposition Table first
final Move transpositionBestMove = tableData.bestMove;
legalMoves = new FastArrayList<Move>(legalMoves); // Copy to avoid modifying original
for (int i = 0; i < numLegalMoves; ++i)
{
if (transpositionBestMove.equals(legalMoves.get(i)))
{
final Move temp = legalMoves.get(0);
legalMoves.set(0, legalMoves.get(i));
legalMoves.set(i, temp);
break;
}
}
}
final int numPlayers = game.players().count();
if (numPlayers > 2)
{
// For paranoid search, we can maybe narrow alpha-beta window if some players already won/lost
alpha = Math.max(alpha, ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT);
beta = Math.min(beta, ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT);
}
Move bestMove = legalMoves.get(0);
if (mover == maximisingPlayer)
{
float score = ALPHA_INIT;
for (int i = 0; i < numLegalMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = legalMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, depth - 1, alpha, beta, maximisingPlayer, stopTime);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return 0;
}
if (value > score)
{
bestMove = m;
score = value;
}
if (score > alpha)
alpha = score;
if (alpha >= beta) // beta cut-off
break;
}
if (transpositionTable != null)
{
// Store data in transposition table
if (score <= originalAlpha) // Found upper bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.UPPER_BOUND);
else if (score >= beta) // Found lower bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.LOWER_BOUND);
else // Found exact value
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.EXACT_VALUE);
}
return score;
}
else
{
float score = BETA_INIT;
for (int i = 0; i < numLegalMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = legalMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, depth - 1, alpha, beta, maximisingPlayer, stopTime);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return 0;
}
if (value < score)
{
bestMove = m;
score = value;
}
if (score < beta)
beta = score;
if (alpha >= beta) // alpha cut-off
break;
}
if (transpositionTable != null)
{
// Store data in transposition table
if (score <= originalAlpha) // Found upper bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.UPPER_BOUND);
else if (score >= beta) // Found lower bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.LOWER_BOUND);
else // Found exact value
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.EXACT_VALUE);
}
return score;
}
}
//-------------------------------------------------------------------------
/**
* Runs iterative deepening Max^N
* @param game
* @param context
* @param maxSeconds
* @param maxDepth
* @param startDepth
* @return Move to play
*/
public Move iterativeDeepeningMaxN
(
final Game game,
final Context context,
final double maxSeconds,
final int maxDepth,
final int startDepth
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
// No need to initialise list of root moves, we re-use the ones from previous paranoid search
final int numRootMoves = sortedRootMoves.size();
final List<ScoredMove> scoredMoves = new ArrayList<ScoredMove>(sortedRootMoves.size());
if (numRootMoves == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
// Vector for visualisation purposes
rootValueEstimates = new FVector(currentRootMoves.size());
// Storing scores found for purpose of move ordering
final FVector moveScores = new FVector(numRootMoves);
final int searchDepthIncrement = allowedSearchDepths == AllowedSearchDepths.Any ? 1 : 2;
int searchDepth = startDepth - searchDepthIncrement;
final int maximisingPlayer = context.state().mover();
final int numPlayers = game.players().count();
// best move found so far during a fully-completed search
// (ignoring incomplete early-terminated search)
Move bestMoveCompleteSearch = sortedRootMoves.get(0);
// We can maybe narrow alpha-beta window if some players already won/lost
rootAlphaInit = ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT;
rootBetaInit = ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT;
while (searchDepth < maxDepth)
{
searchDepth += searchDepthIncrement;
searchedFullTree = true;
float score = ALPHA_INIT;
// best move during this particular search
Move bestMove = sortedRootMoves.get(0);
for (int i = 0; i < numRootMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = sortedRootMoves.get(i);
game.apply(copyContext, m);
final float[] values = maxN(copyContext, searchDepth - 1, maximisingPlayer, rootAlphaInit, rootBetaInit, numPlayers, stopTime);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
bestMove = null;
break;
}
final int origMoveIdx = currentRootMoves.indexOf(m);
if (origMoveIdx >= 0)
{
rootValueEstimates.set(origMoveIdx, (float) scoreToValueEst(values[maximisingPlayer], rootAlphaInit, rootBetaInit));
}
moveScores.set(i, values[maximisingPlayer]);
if (values[maximisingPlayer] > score) // new best move found
{
//System.out.println("New best move: " + m + " with eval = " + value);
score = values[maximisingPlayer];
bestMove = m;
}
if (score >= rootBetaInit) // a winning move, only type of pruning we can do in Max^n
break;
}
// this is iterative deepening stuff again
if (bestMove != null) // search was not interrupted
{
estimatedRootScore = score;
if (score == rootBetaInit)
{
// we've just proven a win, so we can return best move
// found during this search
analysisReport += " (subsequent Max^n found proven win at depth " + searchDepth + ")";
provedWin = true;
return bestMove;
}
else if (score == rootAlphaInit)
{
// we've just proven a loss, so we return the best move
// of the PREVIOUS search (delays loss for the longest
// amount of time)
analysisReport += " (subsequent Max^n found proven loss at depth " + searchDepth + ")";
return bestMoveCompleteSearch;
}
else if (searchedFullTree)
{
// We've searched full tree but did not prove a win or loss
// probably means a draw, play best line we have
analysisReport += " (subsequent Max^n completed search of depth " + searchDepth + " (no proven win or loss))";
return bestMove;
}
bestMoveCompleteSearch = bestMove;
}
else
{
// Decrement because we didn't manage to complete this search
searchDepth -= searchDepthIncrement;
}
if (System.currentTimeMillis() >= stopTime || wantsInterrupt)
{
// we need to return
analysisReport += " (subsequent Max^n completed search of depth " + searchDepth + ")";
return bestMoveCompleteSearch;
}
// order moves based on scores found, for next search
scoredMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
scoredMoves.add(new ScoredMove(sortedRootMoves.get(i), moveScores.get(i)));
}
Collections.sort(scoredMoves);
sortedRootMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
sortedRootMoves.add(scoredMoves.get(i).move);
}
// clear the vector of scores
moveScores.fill(0, numRootMoves, 0.f);
}
analysisReport += " (subsequent Max^n completed search of depth " + searchDepth + ")";
return bestMoveCompleteSearch;
}
/**
* Recursive Max^n search function.
*
* @param context
* @param depth
* @param maximisingPlayer
* @param inAlpha
* @param inBeta
* @param numPlayers How many players in this game?
* @param stopTime
* @return (heuristic) evaluations of the reached state, from perspectives of all players.
*/
public float[] maxN
(
final Context context,
final int depth,
final int maximisingPlayer,
final float inAlpha,
final float inBeta,
final int numPlayers,
final long stopTime
)
{
final Trial trial = context.trial();
final State state = context.state();
if (trial.over())
{
// terminal node
final double[] utils = RankUtils.utilities(context);
final float[] toReturn = new float[utils.length];
for (int p = 1; p < utils.length; ++p)
{
toReturn[p] = (float) utils[p] * BETA_INIT;
if (toReturn[p] != inAlpha && toReturn[p] != inBeta)
{
minHeuristicEval = Math.min(minHeuristicEval, toReturn[p]);
maxHeuristicEval = Math.max(maxHeuristicEval, toReturn[p]);
}
}
return toReturn;
}
else if (depth == 0)
{
searchedFullTree = false;
// heuristic evaluations
final float[] playerScores = new float[numPlayers + 1];
final double[] utils = (context.numActive() == numPlayers) ? null : RankUtils.utilities(context);
for (int p = 1; p <= numPlayers; ++p)
{
if (context.active(p))
{
playerScores[p] = heuristicValueFunction().computeValue(context, p, ABS_HEURISTIC_WEIGHT_THRESHOLD);
}
else
{
playerScores[p] = (float) utils[p] * BETA_INIT;
}
}
final float oppScoreMultiplier = 1.f / numPlayers; // this gives us nicer heuristics around 0
final float[] toReturn = new float[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
for (int other = 1; other <= numPlayers; ++other)
{
if (other == p)
toReturn[p] += playerScores[other];
else
toReturn[p] -= oppScoreMultiplier * playerScores[other];
}
minHeuristicEval = Math.min(minHeuristicEval, toReturn[p]);
maxHeuristicEval = Math.max(maxHeuristicEval, toReturn[p]);
}
return toReturn;
}
final Game game = context.game();
final int mover = state.mover();
final FastArrayList<Move> legalMoves = game.moves(context).moves();
// We can maybe narrow alpha and beta if some players already won/lost
final float alpha = Math.max(inAlpha, ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT);
final float beta = Math.min(inBeta, ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT);
final int numLegalMoves = legalMoves.size();
float[] returnScores = new float[numPlayers + 1];
Arrays.fill(returnScores, ALPHA_INIT);
float score = ALPHA_INIT;
float maximisingPlayerTieBreaker = BETA_INIT;
for (int i = 0; i < numLegalMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = legalMoves.get(i);
game.apply(copyContext, m);
final float[] values = maxN(copyContext, depth - 1, maximisingPlayer, alpha, beta, numPlayers, stopTime);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return null;
}
if (values[mover] > score)
{
score = values[mover];
returnScores = values;
maximisingPlayerTieBreaker = values[maximisingPlayer];
}
else if (values[mover] == score && mover != maximisingPlayer)
{
if (values[maximisingPlayer] < maximisingPlayerTieBreaker)
{
returnScores = values;
maximisingPlayerTieBreaker = values[maximisingPlayer];
}
}
if (score >= beta) // a winning move, only type of pruning we can do in Max^n
break;
}
return returnScores;
}
//-------------------------------------------------------------------------
/**
* @param player
* @return Opponents of given player
*/
public int[] opponents(final int player)
{
final int[] opponents = new int[numPlayersInGame - 1];
int idx = 0;
for (int p = 1; p <= numPlayersInGame; ++p)
{
if (p != player)
opponents[idx++] = p;
}
return opponents;
}
/**
* Converts a score into a value estimate in [-1, 1]. Useful for visualisations.
*
* @param score
* @param alpha
* @param beta
* @return Value estimate in [-1, 1] from unbounded (heuristic) score.
*/
public double scoreToValueEst(final float score, final float alpha, final float beta)
{
if (score == alpha)
return -1.0;
if (score == beta)
return 1.0;
// Map to range [-0.8, 0.8] based on most extreme heuristic evaluations
// observed so far.
return -0.8 + (0.8 - -0.8) * ((score - minHeuristicEval) / (maxHeuristicEval - minHeuristicEval));
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
//System.out.println("initAI of Alpha-Beta called");
if (heuristicsFromMetadata)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction = new Heuristics(new HeuristicTerm[]{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
});
}
}
if (heuristicValueFunction() != null)
heuristicValueFunction().init(game);
// reset these things used for visualisation purposes
estimatedRootScore = 0.f;
maxHeuristicEval = 0.f;
minHeuristicEval = 0.f;
analysisReport = null;
currentRootMoves = null;
rootValueEstimates = null;
// and these things for ExIt
lastSearchedRootContext = null;
lastReturnedMove = null;
numPlayersInGame = game.players().count();
if (game.usesNoRepeatPositionalInGame() || game.usesNoRepeatPositionalInTurn())
transpositionTable = null;
else if (!allowTranspositionTable)
transpositionTable = null;
else
transpositionTable = new TranspositionTable(12);
}
@Override
public boolean supportsGame(final Game game)
{
if (game.players().count() <= 1)
return false;
// if (game.isStochasticGame())
// return false;
if (game.hiddenInformation())
return false;
if (game.hasSubgames()) // Cant properly init most heuristics
return false;
return game.isAlternatingMoveGame();
}
@Override
public double estimateValue()
{
return scoreToValueEst(estimatedRootScore, rootAlphaInit, rootBetaInit);
}
@Override
public String generateAnalysisReport()
{
return analysisReport;
}
@Override
public AIVisualisationData aiVisualisationData()
{
if (currentRootMoves == null || rootValueEstimates == null)
return null;
final FVector aiDistribution = rootValueEstimates.copy();
aiDistribution.subtract(aiDistribution.min());
return new AIVisualisationData(aiDistribution, rootValueEstimates, currentRootMoves);
}
//-------------------------------------------------------------------------
@Override
public FastArrayList<Move> lastSearchRootMoves()
{
final FastArrayList<Move> moves = new FastArrayList<Move>(currentRootMoves.size());
for (final Move move : currentRootMoves)
{
moves.add(move);
}
return moves;
}
@Override
public FVector computeExpertPolicy(final double tau)
{
final FVector distribution = FVector.zeros(currentRootMoves.size());
distribution.set(currentRootMoves.indexOf(lastReturnedMove), 1.f);
distribution.softmax();
return distribution;
}
@Override
public List<ExItExperience> generateExItExperiences()
{
final FastArrayList<Move> actions = new FastArrayList<Move>(currentRootMoves.size());
for (int i = 0; i < currentRootMoves.size(); ++i)
{
final Move m = new Move(currentRootMoves.get(i));
m.setMover(currentRootMoves.get(i).mover());
m.then().clear(); // Can't serialise these, and won't need them
actions.add(m);
}
final ExItExperience experience =
new ExItExperience
(
new Context(lastSearchedRootContext),
new ExItExperienceState(lastSearchedRootContext),
actions,
computeExpertPolicy(1.0),
FVector.zeros(actions.size()),
1.f
);
return Arrays.asList(experience);
}
// protected float getContextValue(int maximisingPlayer, Context context) // just for displaying the search tree
// {
// float heuristicScore = heuristicValueFunction().computeValue(
// context, maximisingPlayer, ABS_HEURISTIC_WEIGHT_THRESHOLD);
//
// for (final int opp : opponents(maximisingPlayer))
// {
// if (context.active(opp))
// heuristicScore -= heuristicValueFunction().computeValue(context, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
// else if (context.winners().contains(opp))
// heuristicScore -= PARANOID_OPP_WIN_SCORE;
// }
//
// // Invert scores if players swapped
// if (context.state().playerToAgent(maximisingPlayer) != maximisingPlayer)
// heuristicScore = -heuristicScore;
//
// minHeuristicEval = Math.min(minHeuristicEval, heuristicScore);
// maxHeuristicEval = Math.max(maxHeuristicEval, heuristicScore);
//
// return heuristicScore;
// }
//-------------------------------------------------------------------------
/**
* Wrapper for score + move, used for sorting moves based on scores.
*
* @author Dennis Soemers
*/
protected class ScoredMove implements Comparable<ScoredMove>
{
/** The move */
public final Move move;
/** The move's score */
public final float score;
/**
* Constructor
* @param move
* @param score
*/
public ScoredMove(final Move move, final float score)
{
this.move = move;
this.score = score;
}
@Override
public int compareTo(final ScoredMove other)
{
final float delta = other.score - score;
if (delta < 0.f)
return -1;
else if (delta > 0.f)
return 1;
else
return 0;
}
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs an Alpha-Beta Search object from instructions in the
* given array of lines
*/
public static AlphaBetaSearch fromLines(final String[] lines)
{
String friendlyName = "Alpha-Beta";
Heuristics heuristics = null;
for (final String line : lines)
{
final String[] lineParts = line.split(",");
if (lineParts[0].toLowerCase().startsWith("heuristics="))
{
heuristics = Heuristics.fromLines(lineParts);
}
else if (lineParts[0].toLowerCase().startsWith("friendly_name="))
{
friendlyName = lineParts[0].substring("friendly_name=".length());
}
}
AlphaBetaSearch alphaBeta = null;
if (heuristics != null)
alphaBeta = new AlphaBetaSearch(heuristics);
if (alphaBeta == null)
alphaBeta = new AlphaBetaSearch();
alphaBeta.friendlyName = friendlyName;
return alphaBeta;
}
//-------------------------------------------------------------------------
public Heuristics heuristicValueFunction()
{
return heuristicValueFunction;
}
//-------------------------------------------------------------------------
/**
* Sets which search depths are allowed
* @param allowed
*/
public void setAllowedSearchDepths(final AllowedSearchDepths allowed)
{
allowedSearchDepths = allowed;
}
//-------------------------------------------------------------------------
}
| 37,418 | 28.440598 | 145 | java |
Ludii | Ludii-master/AI/src/search/minimax/BRSPlus.java | package search.minimax;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilitySimple;
import other.RankUtils;
import other.context.Context;
import other.move.Move;
import other.state.State;
import other.trial.Trial;
import training.expert_iteration.ExItExperience;
import training.expert_iteration.ExItExperience.ExItExperienceState;
import training.expert_iteration.ExpertPolicy;
import utils.data_structures.transposition_table.TranspositionTable;
import utils.data_structures.transposition_table.TranspositionTable.ABTTData;
/**
* Implementation of BRS+ (Esser et al., 2013). Assumes perfect-information games.
* Uses iterative deepening when time-restricted, goes straight for
* depth limit when only depth-limited. Extracts heuristics to use from game's metadata.
*
* Cannot play games with fewer than 3 players (since then it would just revert to the
* normal AlphaBetaSearch that we already have).
*
* @author Dennis Soemers
*/
public class BRSPlus extends ExpertPolicy
{
//-------------------------------------------------------------------------
/** Value we use to initialise alpha ("negative infinity", but not really) */
private static final float ALPHA_INIT = -1000000.f;
/** Value we use to initialise beta ("positive infinity", but not really) */
private static final float BETA_INIT = -ALPHA_INIT;
/** Score we give to winning opponents in paranoid searches in states where game is still going (> 2 players) */
private static final float PARANOID_OPP_WIN_SCORE = 10000.f;
/** We skip computing heuristics with absolute weight value lower than this */
public static final float ABS_HEURISTIC_WEIGHT_THRESHOLD = 0.01f;
/** Our heuristic value function estimator */
private Heuristics heuristicValueFunction = null;
/** If true, we read our heuristic function to use from game's metadata */
private final boolean heuristicsFromMetadata;
/** We'll automatically return our move after at most this number of seconds if we only have one move */
protected double autoPlaySeconds = 0.0;
/** Estimated score of the root node based on last-run search */
protected float estimatedRootScore = 0.f;
/** The maximum heuristic eval we have ever observed */
protected float maxHeuristicEval = 0.f;
/** The minimum heuristic eval we have ever observed */
protected float minHeuristicEval = 0.f;
/** String to print to Analysis tab of the Ludii app */
protected String analysisReport = null;
/** Current list of moves available in root */
protected FastArrayList<Move> currentRootMoves = null;
/** The last move we returned. Need to memorise this for Expert Iteration with AlphaBeta */
protected Move lastReturnedMove = null;
/** Root context for which we've last performed a search */
protected Context lastSearchedRootContext = null;
/** Value estimates of moves available in root */
protected FVector rootValueEstimates = null;
/** The number of players in the game we're currently playing */
protected int numPlayersInGame = 0;
/** Needed for visualisations */
protected float rootAlphaInit = ALPHA_INIT;
/** Needed for visualisations */
protected float rootBetaInit = BETA_INIT;
/** Sorted (hopefully cleverly) list of moves available in root node */
protected FastArrayList<Move> sortedRootMoves = null;
/** If true at end of a search, it means we searched full tree (probably proved a draw) */
protected boolean searchedFullTree = false;
/** Transposition Table */
protected TranspositionTable transpositionTable = null;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public BRSPlus()
{
friendlyName = "BRS+";
heuristicsFromMetadata = true;
transpositionTable = new TranspositionTable(12);
}
/**
* Constructor
* @param heuristicsFilepath
* @throws IOException
* @throws FileNotFoundException
*/
public BRSPlus(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
friendlyName = "BRS+";
final String heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
this.heuristicValueFunction = (Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
heuristicsFromMetadata = false;
transpositionTable = new TranspositionTable(12);
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final int depthLimit = maxDepth > 0 ? maxDepth : Integer.MAX_VALUE;
lastSearchedRootContext = context;
if (transpositionTable != null)
transpositionTable.allocate();
if (maxSeconds > 0)
{
lastReturnedMove = iterativeDeepening(game, context, maxSeconds, depthLimit, 1);
if (transpositionTable != null)
transpositionTable.deallocate();
return lastReturnedMove;
}
else
{
// we'll just do iterative deepening with the depth limit as starting depth
lastReturnedMove = iterativeDeepening(game, context, maxSeconds, depthLimit, depthLimit);
if (transpositionTable != null)
transpositionTable.deallocate();
return lastReturnedMove;
}
}
//-------------------------------------------------------------------------
/**
* Runs iterative deepening alpha-beta
* @param game
* @param context
* @param maxSeconds
* @param maxDepth
* @param startDepth
* @return Move to play
*/
public Move iterativeDeepening
(
final Game game,
final Context context,
final double maxSeconds,
final int maxDepth,
final int startDepth
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
final int numPlayers = game.players().count();
currentRootMoves = new FastArrayList<Move>(game.moves(context).moves());
// Create a shuffled version of list of moves (random tie-breaking)
final FastArrayList<Move> tempMovesList = new FastArrayList<Move>(currentRootMoves);
sortedRootMoves = new FastArrayList<Move>(currentRootMoves.size());
while (!tempMovesList.isEmpty())
{
sortedRootMoves.add(tempMovesList.removeSwap(ThreadLocalRandom.current().nextInt(tempMovesList.size())));
}
final int numRootMoves = sortedRootMoves.size();
final List<ScoredMove> scoredMoves = new ArrayList<ScoredMove>(sortedRootMoves.size());
if (numRootMoves == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
// Vector for visualisation purposes
rootValueEstimates = new FVector(currentRootMoves.size());
// storing scores found for purpose of move ordering
final FVector moveScores = new FVector(numRootMoves);
int searchDepth = startDepth - 1;
final int maximisingPlayer = context.state().playerToAgent(context.state().mover());
// best move found so far during a fully-completed search
// (ignoring incomplete early-terminated search)
Move bestMoveCompleteSearch = sortedRootMoves.get(0);
// For paranoid search, we can narrow alpha-beta window if some players already won/lost
rootAlphaInit = ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT;
rootBetaInit = ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT;
while (searchDepth < maxDepth)
{
++searchDepth;
searchedFullTree = true;
//System.out.println("SEARCHING TO DEPTH: " + searchDepth);
// the real alpha-beta stuff starts here
float score = rootAlphaInit;
float alpha = rootAlphaInit;
final float beta = rootBetaInit;
// best move during this particular search
Move bestMove = sortedRootMoves.get(0);
for (int i = 0; i < numRootMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = sortedRootMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, searchDepth - 1, alpha, beta, maximisingPlayer, stopTime, 1);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
bestMove = null;
break;
}
final int origMoveIdx = currentRootMoves.indexOf(m);
if (origMoveIdx >= 0)
{
rootValueEstimates.set(origMoveIdx, (float) scoreToValueEst(value, rootAlphaInit, rootBetaInit));
}
moveScores.set(i, value);
if (value > score) // new best move found
{
//System.out.println("New best move: " + m + " with eval = " + value);
score = value;
bestMove = m;
}
if (score > alpha) // new lower bound
alpha = score;
if (alpha >= beta) // beta cut-off
break;
}
// alpha-beta is over, this is iterative deepening stuff again
if (bestMove != null) // search was not interrupted
{
estimatedRootScore = score;
if (score == rootBetaInit)
{
// we've just proven a win, so we can return best move
// found during this search
analysisReport = friendlyName + " found a proven win at depth " + searchDepth + ".";
return bestMove;
}
else if (score == rootAlphaInit)
{
// we've just proven a loss, so we return the best move
// of the PREVIOUS search (delays loss for the longest
// amount of time)
analysisReport = friendlyName + " found a proven loss at depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
else if (searchedFullTree)
{
// We've searched full tree but did not prove a win or loss
// probably means a draw, play best line we have
analysisReport = friendlyName + " completed search of depth " + searchDepth + " (no proven win or loss).";
return bestMove;
}
bestMoveCompleteSearch = bestMove;
}
else
{
// decrement because we didn't manage to complete this search
--searchDepth;
}
if (System.currentTimeMillis() >= stopTime || wantsInterrupt)
{
// we need to return
analysisReport = friendlyName + " completed search of depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
// order moves based on scores found, for next search
scoredMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
scoredMoves.add(new ScoredMove(sortedRootMoves.get(i), moveScores.get(i)));
}
Collections.sort(scoredMoves);
sortedRootMoves.clear();
for (int i = 0; i < numRootMoves; ++i)
{
sortedRootMoves.add(scoredMoves.get(i).move);
}
// clear the vector of scores
moveScores.fill(0, numRootMoves, 0.f);
}
analysisReport = friendlyName + " completed search of depth " + searchDepth + ".";
return bestMoveCompleteSearch;
}
/**
* Recursive alpha-beta search function.
*
* @param context
* @param depth
* @param inAlpha
* @param inBeta
* @param maximisingPlayer Who is the maximising player?
* @param stopTime
* @param regMoveCounter Tracks the number of regular moves between successive turns of root player (for BRS+)
* @return (heuristic) evaluation of the reached state, from perspective of maximising player.
*/
public float alphaBeta
(
final Context context,
final int depth,
final float inAlpha,
final float inBeta,
final int maximisingPlayer,
final long stopTime,
final int regMoveCounter
)
{
final Trial trial = context.trial();
final State state = context.state();
final float originalAlpha = inAlpha;
float alpha = inAlpha;
float beta = inBeta;
final long zobrist = state.fullHash();
final ABTTData tableData;
if (transpositionTable != null)
{
tableData = transpositionTable.retrieve(zobrist);
if (tableData != null)
{
if (tableData.depth >= depth)
{
// Already searched deep enough for data in TT, use results
switch(tableData.valueType)
{
case TranspositionTable.EXACT_VALUE:
return tableData.value;
case TranspositionTable.LOWER_BOUND:
alpha = Math.max(alpha, tableData.value);
break;
case TranspositionTable.UPPER_BOUND:
beta = Math.min(beta, tableData.value);
break;
default:
System.err.println("INVALID TRANSPOSITION TABLE DATA!");
break;
}
if (alpha >= beta)
return tableData.value;
}
}
}
else
{
tableData = null;
}
if (trial.over() || !context.active(maximisingPlayer))
{
// terminal node (at least for maximising player)
return (float) RankUtils.agentUtilities(context)[maximisingPlayer] * BETA_INIT;
}
else if (depth == 0)
{
searchedFullTree = false;
// heuristic evaluation
float heuristicScore = heuristicValueFunction.computeValue(
context, maximisingPlayer, ABS_HEURISTIC_WEIGHT_THRESHOLD);
for (final int opp : opponents(maximisingPlayer))
{
if (context.active(opp))
heuristicScore -= heuristicValueFunction.computeValue(context, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (context.winners().contains(opp))
heuristicScore -= PARANOID_OPP_WIN_SCORE;
}
// Invert scores if players swapped
if (state.playerToAgent(maximisingPlayer) != maximisingPlayer)
heuristicScore = -heuristicScore;
minHeuristicEval = Math.min(minHeuristicEval, heuristicScore);
maxHeuristicEval = Math.max(maxHeuristicEval, heuristicScore);
return heuristicScore;
}
final Game game = context.game();
final int mover = state.playerToAgent(state.mover());
FastArrayList<Move> legalMoves = game.moves(context).moves();
final int numLegalMoves = legalMoves.size();
if (tableData != null)
{
// Put best move according to Transposition Table first
final Move transpositionBestMove = tableData.bestMove;
legalMoves = new FastArrayList<Move>(legalMoves); // Copy to avoid modifying original
for (int i = 0; i < legalMoves.size(); ++i)
{
if (transpositionBestMove.equals(legalMoves.get(i)))
{
final Move temp = legalMoves.get(0);
legalMoves.set(0, legalMoves.get(i));
legalMoves.set(i, temp);
break;
}
}
}
final int numPlayers = game.players().count();
// For paranoid search, we can maybe narrow alpha-beta window if some players already won/lost
alpha = Math.max(alpha, ((float) RankUtils.rankToUtil(context.computeNextLossRank(), numPlayers)) * BETA_INIT);
beta = Math.min(beta, ((float) RankUtils.rankToUtil(context.computeNextWinRank(), numPlayers)) * BETA_INIT);
Move bestMove = legalMoves.get(0);
if (mover == maximisingPlayer)
{
float score = ALPHA_INIT;
for (int i = 0; i < numLegalMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = legalMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, depth - 1, alpha, beta, maximisingPlayer, stopTime, 1);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return 0;
}
if (value > score)
{
bestMove = m;
score = value;
}
if (score > alpha)
alpha = score;
if (alpha >= beta) // beta cut-off
break;
}
if (transpositionTable != null)
{
// Store data in transposition table
if (score <= originalAlpha) // Found upper bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.UPPER_BOUND);
else if (score >= beta) // Found lower bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.LOWER_BOUND);
else // Found exact value
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.EXACT_VALUE);
}
return score;
}
else
{
float score = BETA_INIT;
boolean allowRegularMoves = true;
boolean allowSpecialMove = false;
if (regMoveCounter == 2)
{
allowRegularMoves = false;
allowSpecialMove = true;
}
else if (state.playerToAgent(state.next()) != maximisingPlayer)
{
allowSpecialMove = true;
}
boolean cutOff = false;
if (allowRegularMoves)
{
for (int i = 0; i < numLegalMoves; ++i)
{
final Context copyContext = copyContext(context);
final Move m = legalMoves.get(i);
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, depth - 1, alpha, beta, maximisingPlayer, stopTime, regMoveCounter + 1);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return 0;
}
if (value < score)
{
bestMove = m;
score = value;
}
if (score < beta)
beta = score;
if (alpha >= beta) // alpha cut-off
{
cutOff = true;
break;
}
}
}
if (allowSpecialMove & !cutOff)
{
final Context copyContext = copyContext(context);
final Move m;
if (tableData != null) // We have move ordering from TT
m = legalMoves.get(0);
else // No move ordering, just randomly pick a move
m = legalMoves.get(ThreadLocalRandom.current().nextInt(legalMoves.size()));
game.apply(copyContext, m);
final float value = alphaBeta(copyContext, depth - 1, alpha, beta, maximisingPlayer, stopTime, regMoveCounter + 1);
if (System.currentTimeMillis() >= stopTime || wantsInterrupt) // time to abort search
{
return 0;
}
if (value < score)
{
bestMove = m;
score = value;
}
}
if (transpositionTable != null)
{
// Store data in transposition table
if (score <= originalAlpha) // Found upper bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.UPPER_BOUND);
else if (score >= beta) // Found lower bound
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.LOWER_BOUND);
else // Found exact value
transpositionTable.store(bestMove, zobrist, score, depth, TranspositionTable.EXACT_VALUE);
}
return score;
}
}
//-------------------------------------------------------------------------
/**
* @param player
* @return Opponents of given player
*/
public int[] opponents(final int player)
{
final int[] opponents = new int[numPlayersInGame - 1];
int idx = 0;
for (int p = 1; p <= numPlayersInGame; ++p)
{
if (p != player)
opponents[idx++] = p;
}
return opponents;
}
/**
* Converts a score into a value estimate in [-1, 1]. Useful for visualisations.
*
* @param score
* @param alpha
* @param beta
* @return Value estimate in [-1, 1] from unbounded (heuristic) score.
*/
public double scoreToValueEst(final float score, final float alpha, final float beta)
{
if (score == alpha)
return -1.0;
if (score == beta)
return 1.0;
// Map to range [-0.8, 0.8] based on most extreme heuristic evaluations
// observed so far.
return -0.8 + (0.8 - -0.8) * ((score - minHeuristicEval) / (maxHeuristicEval - minHeuristicEval));
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
if (heuristicsFromMetadata)
{
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction = new Heuristics(new HeuristicTerm[]{
new Material(null, Float.valueOf(1.f), null, null),
new MobilitySimple(null, Float.valueOf(0.001f))
});
}
}
if (heuristicValueFunction != null)
heuristicValueFunction.init(game);
// reset these things used for visualisation purposes
estimatedRootScore = 0.f;
maxHeuristicEval = 0.f;
minHeuristicEval = 0.f;
analysisReport = null;
currentRootMoves = null;
rootValueEstimates = null;
// and these things for ExIt
lastSearchedRootContext = null;
lastReturnedMove = null;
numPlayersInGame = game.players().count();
}
@Override
public boolean supportsGame(final Game game)
{
if (game.players().count() <= 2)
return false;
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
return game.isAlternatingMoveGame();
}
@Override
public double estimateValue()
{
return scoreToValueEst(estimatedRootScore, rootAlphaInit, rootBetaInit);
}
@Override
public String generateAnalysisReport()
{
return analysisReport;
}
@Override
public AIVisualisationData aiVisualisationData()
{
if (currentRootMoves == null || rootValueEstimates == null)
return null;
final FVector aiDistribution = rootValueEstimates.copy();
aiDistribution.subtract(aiDistribution.min());
return new AIVisualisationData(aiDistribution, rootValueEstimates, currentRootMoves);
}
//-------------------------------------------------------------------------
@Override
public FastArrayList<Move> lastSearchRootMoves()
{
final FastArrayList<Move> moves = new FastArrayList<Move>(currentRootMoves.size());
for (final Move move : currentRootMoves)
{
moves.add(move);
}
return moves;
}
@Override
public FVector computeExpertPolicy(final double tau)
{
final FVector distribution = FVector.zeros(currentRootMoves.size());
distribution.set(currentRootMoves.indexOf(lastReturnedMove), 1.f);
distribution.softmax();
return distribution;
}
@Override
public List<ExItExperience> generateExItExperiences()
{
final FastArrayList<Move> actions = new FastArrayList<Move>(currentRootMoves.size());
for (int i = 0; i < currentRootMoves.size(); ++i)
{
final Move m = new Move(currentRootMoves.get(i));
m.setMover(currentRootMoves.get(i).mover());
m.then().clear(); // Can't serialise these, and won't need them
actions.add(m);
}
final ExItExperience experience =
new ExItExperience
(
new Context(lastSearchedRootContext),
new ExItExperienceState(lastSearchedRootContext),
actions,
computeExpertPolicy(1.0),
FVector.zeros(actions.size()),
1.f
);
return Arrays.asList(experience);
}
//-------------------------------------------------------------------------
/**
* Wrapper for score + move, used for sorting moves based on scores.
*
* @author Dennis Soemers
*/
private class ScoredMove implements Comparable<ScoredMove>
{
/** The move */
public final Move move;
/** The move's score */
public final float score;
/**
* Constructor
* @param move
* @param score
*/
public ScoredMove(final Move move, final float score)
{
this.move = move;
this.score = score;
}
@Override
public int compareTo(final ScoredMove other)
{
final float delta = other.score - this.score;
if (delta < 0.f)
return -1;
else if (delta > 0.f)
return 1;
else
return 0;
}
}
//-------------------------------------------------------------------------
/**
* @param lines
* @return Constructs a BRS+ object from instructions in the
* given array of lines
*/
public static BRSPlus fromLines(final String[] lines)
{
String friendlyName = "BRS+";
String heuristicsFilepath = null;
for (final String line : lines)
{
final String[] lineParts = line.split(",");
if (lineParts[0].toLowerCase().startsWith("heuristics="))
{
heuristicsFilepath = lineParts[0].substring("heuristics=".length());
}
else if (lineParts[0].toLowerCase().startsWith("friendly_name="))
{
friendlyName = lineParts[0].substring("friendly_name=".length());
}
}
BRSPlus brsPlus = null;
if (heuristicsFilepath != null)
{
try
{
brsPlus = new BRSPlus(heuristicsFilepath);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
if (brsPlus == null)
brsPlus = new BRSPlus();
brsPlus.friendlyName = friendlyName;
return brsPlus;
}
//-------------------------------------------------------------------------
}
| 24,838 | 27.485092 | 120 | java |
Ludii | Ludii-master/AI/src/search/minimax/BiasedUBFM.java | package search.minimax;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import game.Game;
import gnu.trove.list.array.TLongArrayList;
import main.collections.FVector;
import main.collections.FastArrayList;
import metadata.ai.heuristics.Heuristics;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.state.State;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicy;
import utils.data_structures.ScoredIndex;
/**
* AI based on Unbounded Best-First Minimax, which uses the action evaluation to select a small number
* of actions that will really be simulated (the most promising ones). If selectionEpsilon != 0, then any other move can still be randomly
* picked for exploration with a probability of selectionEpsilon.
*
* @author cyprien
*/
public class BiasedUBFM extends UBFM
{
/** Number of moves that are really evaluated with the heuristics at each step of the exploration. */
private int nbStateEvaluationsPerNode = 6;
//-------------------------------------------------------------------------
/** A learned policy to use in Selection phase */
protected SoftmaxPolicy learnedSelectionPolicy = null;
//-------------------------------------------------------------------------
public static BiasedUBFM createBiasedUBFM ()
{
return new BiasedUBFM();
}
/**
* Constructor:
*/
public BiasedUBFM ()
{
super();
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0f));
friendlyName = "Biased UBFM";
}
/**
* Constructor
* @param heuristics
*/
public BiasedUBFM(final Heuristics heuristics)
{
super(heuristics);
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0f));
friendlyName = "Biased UBFM";
}
//-------------------------------------------------------------------------
@Override
protected FVector estimateMovesValues
(
final FastArrayList<Move> legalMoves,
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth,
final long stopTime
)
{
final int numLegalMoves = legalMoves.size();
final Game game = context.game();
final State state = context.state();
final int mover = state.playerToAgent(state.mover());
final List<ScoredIndex> consideredMoveIndices = new ArrayList<ScoredIndex>(numLegalMoves);
for (int i = 0; i < numLegalMoves; ++i)
{
final Move m = legalMoves.get(i);
final float actionValue = learnedSelectionPolicy.computeLogit(context,m);
consideredMoveIndices.add(new ScoredIndex(i,actionValue));
}
Collections.sort(consideredMoveIndices);
final FVector moveScores = new FVector(numLegalMoves);
for (int i = 0; i < numLegalMoves; ++i)
{
// filling default score for each moves:
moveScores.set(i, (mover==maximisingPlayer)? -BETA_INIT+1: BETA_INIT-1);
}
for (int k = 0; k < Math.min(nbStateEvaluationsPerNode, numLegalMoves); k++)
{
final int i = consideredMoveIndices.get(k).index;
final Move m = legalMoves.get(i);
final Context contextCopy = new TempContext(context);
game.apply(contextCopy, m);
nodeHashes.add(contextCopy.state().fullHash(contextCopy));
final float heuristicScore = getContextValue(contextCopy, maximisingPlayer, nodeHashes,depth);
nodeHashes.removeAt(nodeHashes.size()-1);
moveScores.set(i, heuristicScore);
if ((System.currentTimeMillis() >= stopTime) || wantsInterrupt)
{
break;
}
}
return moveScores;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
super.initAI(game, playerID);
// Instantiate feature sets for selection policy
if (learnedSelectionPolicy != null)
learnedSelectionPolicy.initAI(game, playerID);
return;
}
@Override
public boolean supportsGame(final Game game)
{
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
if (game.hasSubgames()) // Cant properly init most heuristics
return false;
if (!(game.isAlternatingMoveGame()))
return false;
return ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null));
}
//-------------------------------------------------------------------------
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final SoftmaxPolicy policy)
{
learnedSelectionPolicy = policy;
}
/**
* Sets the number of moves that will be really evaluated with a simulation and a call to the heuristicValue function.
* @param value
*/
public void setNbStateEvaluationsPerNode(final int value)
{
nbStateEvaluationsPerNode = value;
}
}
| 4,832 | 26 | 139 | java |
Ludii | Ludii-master/AI/src/search/minimax/HybridUBFM.java | package search.minimax;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import gnu.trove.list.array.TLongArrayList;
import other.RankUtils;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.state.State;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicy;
import utils.data_structures.transposition_table.TranspositionTableUBFM;
import utils.data_structures.transposition_table.TranspositionTableUBFM.UBFMTTData;
/**
* AI based on Unbounded Best-First Search, using trained action evaluations to complete the heuristic scores with informed playouts.
* Can also work with no trained features, and will then execute random playouts.
*
* (the formula for the evaluation of a context is v(s) = h(s) * x + (1-x) * p(s) * max(abs(h))
* where h is the heuristic score, x is heuristicScoreWeight parameter, p(s) is the average
* ranking utility obtained in the playouts (between -1 and 1) and max(abs(h)) is the
* maximum absolute value of heuristics observed up to now. An effect of this choice is that
* the playouts will have less impact when the AI doesn't know much about the heuristics range.)
*
*
* @author cyprien
*/
public class HybridUBFM extends UBFM
{
/** An epsilon parameter to give to the selection policy */
private final float epsilon = 0.5f;
/** Number of playouts for each state's evaluation */
protected int nbPlayoutsPerEvaluation = 6;
/** Weight of heuristics score in state evaluation */
protected float heuristicScoreWeight = 0.5f;
//-------------------------------------------------------------------------
/** A learned policy to use in the selection phase */
protected SoftmaxPolicy learnedSelectionPolicy = null;
/** For analysis report: */
private int nbPlayoutsDone;
/** Maximum absolute value recorded for heuristic scores */
protected float maxAbsHeuristicScore = 0f;
/** For the AI visualisation data: */
float maxRegisteredValue;
float minRegisteredValue;
//-------------------------------------------------------------------------
public static HybridUBFM createHybridUBFM()
{
return new HybridUBFM();
}
/**
* Constructor:
*/
public HybridUBFM ()
{
super();
friendlyName = "Hybrid UBFM";
}
//-------------------------------------------------------------------------
@Override
protected Move BFSSelection
(
final Game game,
final Context context,
final double maxSeconds,
final int depthLimit
)
{
nbPlayoutsDone = 0;
final Move res = super.BFSSelection(game, context, maxSeconds,depthLimit);
analysisReport += "(" + Integer.toString(nbPlayoutsDone) + " playouts done)";
return res;
}
@Override
protected float getContextValue
(
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth
)
{
final State state = context.state();
final long zobrist = state.fullHash(context);
final int newMover = state.playerToAgent(state.mover());
boolean valueRetrievedFromMemory = false;
float contextScore = Float.NaN;
final UBFMTTData tableData;
if (transpositionTable != null)
{
tableData = transpositionTable.retrieve(zobrist);
if (tableData != null)
{
// Already searched for data in TT, use results
switch(tableData.valueType)
{
case TranspositionTableUBFM.EXACT_VALUE:
contextScore = tableData.value;
valueRetrievedFromMemory = true;
break;
case TranspositionTableUBFM.INVALID_VALUE:
System.err.println("INVALID TRANSPOSITION TABLE DATA: INVALID VALUE");
break;
default:
System.err.println("INVALID TRANSPOSITION TABLE DATA: INVALID VALUE");
break;
}
}
}
// Only compute heuristicScore if we didn't have a score registered in the TT
if (!valueRetrievedFromMemory)
{
if (context.trial().over() || !context.active(maximisingPlayer))
{
// terminal node (at least for maximising player)
contextScore = (float) RankUtils.agentUtilities(context)[maximisingPlayer] * BETA_INIT;
}
else
{
float scoreMean = 0f;
float heuristicScore = heuristicValueFunction().computeValue(
context, maximisingPlayer, ABS_HEURISTIC_WEIGHT_THRESHOLD);
for (final int opp : opponents(maximisingPlayer))
{
if (context.active(opp))
heuristicScore -= heuristicValueFunction().computeValue(context, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (context.winners().contains(opp))
heuristicScore -= PARANOID_OPP_WIN_SCORE;
}
for (int i = 0; i<nbPlayoutsPerEvaluation; i++)
{
final Context contextCopy = new TempContext(context);
nbPlayoutsDone += 1;
if (learnedSelectionPolicy != null)
learnedSelectionPolicy.runPlayout(null, contextCopy);
else
context.game().playout(contextCopy, null, 1.0, null, 0, 200, ThreadLocalRandom.current()); // arbitrary max 200 moves
scoreMean += RankUtils.agentUtilities(contextCopy)[maximisingPlayer] * maxAbsHeuristicScore / nbPlayoutsPerEvaluation;
}
contextScore = heuristicScore * heuristicScoreWeight + scoreMean * (1f - heuristicScoreWeight);
if (debugDisplay)
{
if (ThreadLocalRandom.current().nextFloat() < 0.1)
{
System.out.printf
(
"heuristic score is %.5g while avg score is %.5g -> final value is %.5g\n",
Float.valueOf(heuristicScore),
Float.valueOf(scoreMean),
Float.valueOf(contextScore)
);
}
}
minHeuristicEval = Math.min(minHeuristicEval, heuristicScore);
maxHeuristicEval = Math.max(maxHeuristicEval, heuristicScore);
maxRegisteredValue = Math.max(contextScore, maxRegisteredValue);
minRegisteredValue = Math.min(contextScore, minRegisteredValue);
maxAbsHeuristicScore = Math.max(maxAbsHeuristicScore, Math.abs(heuristicScore));
}
if (transpositionTable != null)
transpositionTable.store(zobrist, contextScore, depth, TranspositionTableUBFM.EXACT_VALUE, null);
nbStatesEvaluated += 1;
}
if (savingSearchTreeDescription)
searchTreeOutput.append("("+stringOfNodeHashes(nodeHashes)+","+Float.toString(contextScore)+","+((newMover==maximisingPlayer)? 1: 2)+"),\n");
return contextScore;
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
super.initAI(game, playerID);
if ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null))
{
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(epsilon));
learnedSelectionPolicy.initAI(game, playerID);
}
maxAbsHeuristicScore = 0;
return;
}
//--------------------------------------------------------------------------
/**
* Converts a score into a value estimate in [-1, 1]. Useful for visualisations.
*
* @param score
* @param alpha
* @param beta
* @return Value estimate in [-1, 1] from unbounded (heuristic) score.
*/
@Override
public double scoreToValueEst(final float score, final float alpha, final float beta)
{
if (score <= alpha+10)
return -1.0;
if (score >= beta-10)
return 1.0;
minRegisteredValue = Math.min(minRegisteredValue, minHeuristicEval);
maxRegisteredValue = Math.max(maxRegisteredValue, maxHeuristicEval);
// Map to range [-0.8, 0.8] based on most extreme heuristic evaluations
// observed so far.
return -0.8 + (0.8 - -0.8) * ((score - minRegisteredValue) / (maxRegisteredValue - minRegisteredValue));
}
//-------------------------------------------------------------------------
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final SoftmaxPolicy policy)
{
learnedSelectionPolicy = policy;
}
/**
* Sets the number of playouts per context evaluations.
* @param n
*/
public void setPlayoutsPerEvaluation(final int n)
{
nbPlayoutsPerEvaluation = n;
}
/**
* Set the weight of the heuristic evaluation function in the evaluation of a move.
* @param value
*/
public void setHeuristicScoreWeight(final float value)
{
heuristicScoreWeight = value;
}
}
| 8,289 | 28.190141 | 144 | java |
Ludii | Ludii-master/AI/src/search/minimax/LazyUBFM.java | package search.minimax;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import gnu.trove.list.array.TLongArrayList;
import main.collections.FVector;
import main.collections.FastArrayList;
import metadata.ai.heuristics.Heuristics;
import other.context.Context;
import other.move.Move;
import other.state.State;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicy;
/**
* AI based on Unbounded Best-First Search, using trained action evaluations to complete the heuristic scores.
* [...]
*
* @author cyprien
*/
public class LazyUBFM extends UBFM
{
/** Weight of the action evaluation when linearly combined with the heuristic score */
private static float actionEvaluationWeight = 0.5f;
//-------------------------------------------------------------------------
/** A learned policy to use in for the action evaluation */
protected SoftmaxPolicy learnedSelectionPolicy = null;
/** A boolean to know if it is the first turn the AI is playing on this game. If so, it will just use
* a basic UBFM approach to have an idea of the heuristics range. */
boolean firstTurn;
/** Different fields to have an idea of how to combine action evaluations and heuristic scores properly */
float estimatedHeuristicScoresRange;
float maxActionLogit = Float.NEGATIVE_INFINITY;
float minActionLogit = Float.POSITIVE_INFINITY;
float estimatedActionLogitRange;
float actionLogitSum;
float actionLogitComputations;
float estimatedActionLogitMean;
/** For the AI visualisation data: */
float maxRegisteredValue;
float minRegisteredValue;
//-------------------------------------------------------------------------
public static LazyUBFM createLazyUBFM()
{
return new LazyUBFM();
}
/**
* Constructor:
*/
public LazyUBFM()
{
super();
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0f));
friendlyName = "Lazy UBFM";
return;
}
/**
* Constructor
* @param heuristics
*/
public LazyUBFM(final Heuristics heuristics)
{
super(heuristics);
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0f));
friendlyName = "Lazy UBFM";
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
final Move bestMove = super.selectAction(game, context, maxSeconds, maxIterations, maxDepth);
// super.selectAction will call this class's own estimateMovesValues
firstTurn = false;
estimatedHeuristicScoresRange = maxHeuristicEval - minHeuristicEval;
estimatedActionLogitRange = maxActionLogit - minActionLogit;
estimatedActionLogitMean = actionLogitSum / actionLogitComputations;
return bestMove;
}
@Override
protected FVector estimateMovesValues
(
final FastArrayList<Move> legalMoves,
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth,
final long stopTime
)
{
final State state = context.state();
final int mover = state.playerToAgent(state.mover());
final float heuristicScore = getContextValue(context, maximisingPlayer, nodeHashes, mover);
if (savingSearchTreeDescription)
searchTreeOutput.append("("+stringOfNodeHashes(nodeHashes)+","+Float.toString(heuristicScore)+","+((mover==maximisingPlayer)? 1:2)+"),\n");
final int numLegalMoves = legalMoves.size();
final FVector moveScores = new FVector(numLegalMoves);
// Computing action scores (stored in moveScores)
for (int i = 0; i < numLegalMoves; ++i)
{
final Move m = legalMoves.get(i);
final float actionValue = learnedSelectionPolicy.computeLogit(context, m);
actionLogitSum += actionValue;
actionLogitComputations += 1;
maxActionLogit = Math.max(actionValue, maxActionLogit);
minActionLogit = Math.min(actionValue, minActionLogit);
moveScores.set(i, actionValue);
}
if (firstTurn)
{
// Uses the classical UBFM approach on the first turn.
final FVector res = super.estimateMovesValues(legalMoves, context, maximisingPlayer, nodeHashes, depth, stopTime);
return res;
}
else
{
final int sign = (maximisingPlayer == mover)? 1 : -1 ;
for (int i=0; i<numLegalMoves; i++)
{
double r = 1;
if (debugDisplay)
{
r = ThreadLocalRandom.current().nextDouble(); // just for occasional display
if (r < 0.05)
{
System.out.printf
(
"action score is %.6g and heuristicScore is %.6g ",
Float.valueOf(moveScores.get(i)),
Float.valueOf(heuristicScore)
);
}
}
// (*2 because the maximal gap with the mean is about half of the range)
float actionScore = (actionEvaluationWeight * (moveScores.get(i)-estimatedActionLogitMean) * sign * estimatedHeuristicScoresRange * 2) / estimatedActionLogitRange;
moveScores.set(i, heuristicScore + actionScore);
maxRegisteredValue = Math.max(heuristicScore + actionScore, maxRegisteredValue);
minRegisteredValue = Math.min(heuristicScore + actionScore, minRegisteredValue);
if (debugDisplay)
if (r < 0.05)
System.out.printf("-> eval is %.6g\n", Float.valueOf(moveScores.get(i)));
}
return moveScores;
}
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
super.initAI(game, playerID);
// Instantiate feature sets for selection policy
if (learnedSelectionPolicy != null)
learnedSelectionPolicy.initAI(game, playerID);
firstTurn = true;
actionLogitComputations = 0;
actionLogitSum = 0;
maxActionLogit = Float.NEGATIVE_INFINITY;
minActionLogit = Float.POSITIVE_INFINITY;
maxRegisteredValue = Float.NEGATIVE_INFINITY;
minRegisteredValue = Float.POSITIVE_INFINITY;
return;
}
@Override
public boolean supportsGame(final Game game)
{
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
if (game.hasSubgames()) // Cant properly init most heuristics
return false;
if (!(game.isAlternatingMoveGame()))
return false;
return ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null));
}
//-------------------------------------------------------------------------
@Override
public double scoreToValueEst(final float score, final float alpha, final float beta)
{
if (score <= alpha+10)
return -1.0;
if (score >= beta-10)
return 1.0;
minRegisteredValue = Math.min(minRegisteredValue, minHeuristicEval);
maxRegisteredValue = Math.max(maxRegisteredValue, maxHeuristicEval);
// Map to range [-0.8, 0.8] based on most extreme heuristic evaluations
// observed so far.
return -0.8 + (0.8 - -0.8) * ((score - minRegisteredValue) / (maxRegisteredValue - minRegisteredValue));
}
//-------------------------------------------------------------------------
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final SoftmaxPolicy policy)
{
learnedSelectionPolicy = policy;
}
/**
* Sets the weight of the action evaluation in the context evaluations.
* @param value the weight
*/
public static void setActionEvaluationWeight(final float value)
{
actionEvaluationWeight = value;
}
}
| 7,473 | 26.992509 | 169 | java |
Ludii | Ludii-master/AI/src/search/minimax/NaiveActionBasedSelection.java | package search.minimax;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import main.collections.FastArrayList;
import other.AI;
import other.context.Context;
import other.move.Move;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicy;
import utils.data_structures.ScoredMove;
/**
* Naive AI that just picks the most promising action according to its learned selection policy
* based on actions, with no exploration.
*
* @author cyprien
*/
public class NaiveActionBasedSelection extends AI
{
/** A learned policy to use in Selection phase */
protected SoftmaxPolicy learnedSelectionPolicy = null;
/** Current list of moves available in root */
protected FastArrayList<Move> currentRootMoves = null;
protected float selectionEpsilon = 0f;
protected int selectActionNbCalls = 0;
//-------------------------------------------------------------------------
/**
* Constructor:
*/
public NaiveActionBasedSelection ()
{
super();
friendlyName = "Naive Action Based Selection";
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
currentRootMoves = new FastArrayList<Move>(game.moves(context).moves());
Move selectedMove;
if ((learnedSelectionPolicy != null) && (ThreadLocalRandom.current().nextDouble(1.) < selectionEpsilon))
{
final int numRootMoves = currentRootMoves.size();
final List<ScoredMove> consideredMoveIndices = new ArrayList<ScoredMove>(numRootMoves);
for (int i=0; i<numRootMoves; ++i)
{
final Move m = currentRootMoves.get(i);
final float actionValue = learnedSelectionPolicy.computeLogit(context,m);
consideredMoveIndices.add(new ScoredMove(m, actionValue, 1));
}
Collections.sort(consideredMoveIndices);
selectedMove = consideredMoveIndices.get(0).move;
}
else
{
final int r = ThreadLocalRandom.current().nextInt(currentRootMoves.size());
final Move move = currentRootMoves.get(r);
selectedMove = move;
}
selectActionNbCalls += 1;
return selectedMove;
}
//-------------------------------------------------------------------------
/**
* Initialising the AI (almost the same as with AlphaBeta)
*/
@Override
public void initAI(final Game game, final int playerID)
{
currentRootMoves = null;
// Instantiate feature sets for selection policy
if ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null))
{
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0f));
learnedSelectionPolicy.initAI(game, playerID);
}
selectActionNbCalls = 0;
return;
}
@Override
public boolean supportsGame(final Game game)
{
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
return ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null));
}
//-------------------------------------------------------------------------
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final SoftmaxPolicy policy)
{
learnedSelectionPolicy = policy;
}
public void setSelectionEpsilon(final float eps)
{
selectionEpsilon = eps;
}
public int getSelectActionNbCalls()
{
return selectActionNbCalls;
}
}
| 3,641 | 23.608108 | 109 | java |
Ludii | Ludii-master/AI/src/search/minimax/UBFM.java | package search.minimax;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import gnu.trove.list.array.TLongArrayList;
import main.FileHandling;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilityAdvanced;
import other.RankUtils;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.state.State;
import other.trial.Trial;
import training.expert_iteration.ExItExperience;
import training.expert_iteration.ExItExperience.ExItExperienceState;
import training.expert_iteration.ExpertPolicy;
import utils.data_structures.ScoredMove;
import utils.data_structures.transposition_table.TranspositionTableUBFM;
import utils.data_structures.transposition_table.TranspositionTableUBFM.UBFMTTData;
/**
* Implementation of Unbounded Best-First Minimax
* (as described in Learning to Play Two-Player Perfect-Information Games
* without Knowledge by Quentin Cohen-Solal (2021))
* (chunks of code copied from AlphaBetaSearch, especially the variables initialisation)
*
* The implementation completely relies on the transposition table so it is not optional.
*
* note that by default the transposition table is cleared each turn, if
* it is not the case then it keeps growing infinitely
*
* @author cyprien
*/
public class UBFM extends ExpertPolicy
{
/** Boolean to activate additional outputs for debugging */
public boolean debugDisplay = false;
/** Set to true to store a description of the search tree in the treeSaveFile.
* The file will contain a list where each item is a tuple representing a node in the form (nodeHashes, heuristicScore, player).
* nodeHashes is itself a tuple containing the sequence of the hash codes that leads to the node.
* heuristicScore is the evaluation of the state with the heuristics (unless the state is terminal)
*/
public boolean savingSearchTreeDescription = false;
public String treeSaveFile = "/home/cyprien/Documents/M1/Internship/search_trees_raw/default.sav"; //FIXME
/** If true, each exploration will be continued up to the end of the tree. */
protected boolean fullPlayouts = false;
/** Set to true to reset the TT after each move (usually only for tree search display) */
public boolean resetTTeachTurn = true;
/** Value of epsilon if a randomised policy is picked (default is epsilon-greedy) */
protected double selectionEpsilon = 0.1;
/**
* If set to an integer, the AI will always play for the same maximising player. This is useful when using
* the same AI to play for an opponent with the same transposition table
*/
protected Integer forcedMaximisingPlayer = null;
//-------------------------------------------------------------------------
/** A type for the selection policy */
public enum SelectionPolicy
{
BEST, // picks the move of the current principal path (the one with the best score)
SAFEST // variant to pick the move that was explored the most
}
/** Selection policy used: */
protected SelectionPolicy selectionPolicy = SelectionPolicy.SAFEST;
/** A type for the exploration policy */
public enum ExplorationPolicy
{
BEST, // always picks the move that seems the best
EPSILON_GREEDY, // with a probability epsilon, picks a uniformly random move, else picks the best
// to add: softmax
}
/** Exploration policy used: */
protected ExplorationPolicy explorationPolicy = ExplorationPolicy.EPSILON_GREEDY;
//-------------------------------------------------------------------------
/** Value we use to initialise alpha ("negative infinity", but not really) */
public static final float ALPHA_INIT = -1000000.f;
/** Value we use to initialise beta ("positive infinity", but not really) */
public static final float BETA_INIT = -ALPHA_INIT;
/** Score we give to winning opponents in paranoid searches in states where game is still going (> 2 players) */
public static final float PARANOID_OPP_WIN_SCORE = 10000.f;
/** We skip computing heuristics with absolute weight value lower than this */
public static final float ABS_HEURISTIC_WEIGHT_THRESHOLD = 0.001f;
//-------------------------------------------------------------------------
/** Our heuristic value function estimator */
protected Heuristics heuristicValueFunction = null;
/** If true, we read our heuristic function to use from game's metadata */
final boolean heuristicsFromMetadata;
/** We'll automatically return our move after at most this number of seconds if we only have one move */
protected final double autoPlaySeconds = 0.3;
/** Estimated score of the root node based on last-run search */
protected float estimatedRootScore = 0.f;
//-------------------------------------------------------------------------
/** The maximum heuristic eval we have ever observed */
protected float maxHeuristicEval = ALPHA_INIT;
/** The minimum heuristic eval we have ever observed */
protected float minHeuristicEval = BETA_INIT;
/** String to print to Analysis tab of the Ludii app */
protected String analysisReport = null;
/** Current list of moves available in root */
protected FastArrayList<Move> currentRootMoves = null;
/** The last move we returned. Need to memorise this for Expert Iteration with AlphaBeta */
protected Move lastReturnedMove = null;
/** Root context for which we've last performed a search */
protected Context lastSearchedRootContext = null;
/** Value estimates of moves available in root */
protected FVector rootValueEstimates = null;
/** The number of players in the game we're currently playing */
protected int numPlayersInGame = 0;
/** Needed for visualisations */
protected final float rootAlphaInit = ALPHA_INIT;
/** Needed for visualisations */
protected final float rootBetaInit = BETA_INIT;
/** Transposition Table */
protected TranspositionTableUBFM transpositionTable = null;
//-------------------------------------------------------------------------
/** Maximum depth of the analysis performed, for an analysis report */
protected int maxDepthReached;
/** Number of different states evaluated, for an analysis report */
protected int nbStatesEvaluated;
/** Scores of the moves from the root, for the final decision of the move to play */
protected float[] rootMovesScores;
/** numBitsPrimaryCode argument given when a TT is created (to avoid magic numbers in the code)*/
protected final int numBitsPrimaryCodeForTT = 12;
//-------------------------------------------------------------------------
public StringBuffer searchTreeOutput = new StringBuffer();
/** Number of calls of the recursive MinimaxBFS function since the last call of selectAction */
protected int callsOfMinimax = 0;
/** Sum of the entries in the TT at each turn, assuming the TT is reset each turn, for debug display */
protected int totalNumberOfEntriesTT;
//-------------------------------------------------------------------------
/**
* Creates a standard unbounded best-first minimax searcher.
* @return UBFM agent
*/
public static UBFM createUBFM()
{
return new UBFM();
}
//-------------------------------------------------------------------------
/**
* Constructor
*/
public UBFM()
{
friendlyName = "UBFM";
heuristicsFromMetadata = true;
}
/**
* Constructor
* @param heuristicsFilepath
* @throws IOException
* @throws FileNotFoundException
*/
public UBFM(final String heuristicsFilepath) throws FileNotFoundException, IOException
{
final String heuristicsStr = FileHandling.loadTextContentsFromFile(heuristicsFilepath);
heuristicValueFunction = (Heuristics)compiler.Compiler.compileObject
(
heuristicsStr,
"metadata.ai.heuristics.Heuristics",
new Report()
);
heuristicsFromMetadata = false;
friendlyName = "UBFM";
}
/**
* Constructor
* @param heuristics
*/
public UBFM(final Heuristics heuristics)
{
heuristicValueFunction = heuristics;
heuristicsFromMetadata = false;
friendlyName = "UBFM";
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
maxDepthReached = 0;
nbStatesEvaluated = 0;
callsOfMinimax = 0;
lastReturnedMove = BFSSelection(game, context, (maxSeconds >= 0) ? maxSeconds : Double.MAX_VALUE, maxIterations);
return lastReturnedMove;
}
/**
* Decides the move to play from the root.
* @param rootTableData
* @param maximising
* @return
*/
protected ScoredMove finalDecision(final UBFMTTData rootTableData, boolean maximising)
{
switch (selectionPolicy)
{
case BEST:
return rootTableData.sortedScoredMoves.get(0);
case SAFEST:
ScoredMove scoredMove;
if (debugDisplay)
{
System.out.print("sortedScoredMoves:\n(");
for (int i=0; i<rootTableData.sortedScoredMoves.size(); i++)
{
scoredMove = rootTableData.sortedScoredMoves.get(i);
System.out.print(Integer.toString(i)+": score "+Float.toString(scoredMove.score)+" ("+Integer.toString(scoredMove.nbVisits)+"); ");
}
System.out.println(")");
}
ScoredMove safestScoredMove = rootTableData.sortedScoredMoves.get(0);
for (int i=0; i<rootTableData.sortedScoredMoves.size(); i++)
{
scoredMove = rootTableData.sortedScoredMoves.get(i);
if
(
(scoredMove.nbVisits > safestScoredMove.nbVisits)
||
(
(scoredMove.nbVisits == safestScoredMove.nbVisits)
&&
(
( maximising && (scoredMove.score > safestScoredMove.score))
||
((!maximising) && (scoredMove.score < safestScoredMove.score))
)
)
)
{
safestScoredMove = scoredMove;
}
}
return safestScoredMove;
default:
System.err.println("Error: selectionPolicy not implemented");
return rootTableData.sortedScoredMoves.get(0);
}
}
/**
* Performs the unbounded best-first search algorithm.
* @param game
* @param context
* @param maxSeconds
* @param iterationLimit (set to -1 for no limit)
* @return
*/
protected Move BFSSelection
(
final Game game,
final Context context,
final double maxSeconds,
final int iterationLimit
)
{
final long startTime = System.currentTimeMillis();
long stopTime = (maxSeconds > 0.0) ? startTime + (long) (maxSeconds * 1000) : Long.MAX_VALUE;
currentRootMoves = new FastArrayList<Move>(game.moves(context).moves());
final int numRootMoves = currentRootMoves.size();
final State state = context.state();
final int mover = state.playerToAgent(state.mover());
final int maximisingPlayer;
if (forcedMaximisingPlayer == null)
maximisingPlayer = context.state().playerToAgent(context.state().mover());
else
maximisingPlayer = forcedMaximisingPlayer.intValue();
if (!transpositionTable.isAllocated())
{
transpositionTable.allocate();
// For visualisation purpose:
// 0 is always a possible value because it is the value of a draw
minHeuristicEval = 0f;
maxHeuristicEval = 0f;
}
if (numRootMoves == 1)
{
// play faster if we only have one move available anyway
if (autoPlaySeconds >= 0.0 && autoPlaySeconds < maxSeconds)
stopTime = startTime + (long) (autoPlaySeconds * 1000);
}
// Vector for visualisation purposes
rootValueEstimates = new FVector(numRootMoves);
rootMovesScores = new float[numRootMoves];
// To output a visual graph of the search tree:
searchTreeOutput.setLength(0);
searchTreeOutput.append("[\n");
long zobrist = context.state().fullHash(context);
final Context contextCopy = copyContext(context);
TLongArrayList initialnodeHashes = new TLongArrayList();
initialnodeHashes.add(zobrist);
if (savingSearchTreeDescription)
searchTreeOutput.append("("+stringOfNodeHashes(initialnodeHashes)+","+Float.toString(getContextValue(context,maximisingPlayer,initialnodeHashes,0))+","+Integer.toString((mover==maximisingPlayer)? 1:2)+"),\n");
int iterationCount = 0;
final int maxNbIterations = (iterationLimit>0)? iterationLimit : Integer.MAX_VALUE;
while ((iterationCount == 0) || (System.currentTimeMillis() < stopTime && ( !wantsInterrupt) && (iterationCount < maxNbIterations)))
{
// Calling the recursive minimaxBFS:
float minimaxResult = minimaxBFS(contextCopy, maximisingPlayer, stopTime, 1, initialnodeHashes);
estimatedRootScore = (float) scoreToValueEst(minimaxResult, rootAlphaInit, rootBetaInit);
iterationCount += 1;
}
final UBFMTTData rootTableData = transpositionTable.retrieve(zobrist);
final ScoredMove finalChoice = finalDecision(rootTableData, mover==maximisingPlayer);
analysisReport = friendlyName + " (player " + maximisingPlayer + ") completed an analysis that reached at some point a depth of " + maxDepthReached + ":\n";
analysisReport += "best value observed at root "+Float.toString(finalChoice.score)+",\n";
analysisReport += Integer.toString(nbStatesEvaluated)+" different states were evaluated\n";
analysisReport +=
String.format
(
"%d iterations, with %d calls of minimax",
Integer.valueOf(iterationCount),
Integer.valueOf(callsOfMinimax)
);
if ((maxSeconds > 0.) && (System.currentTimeMillis()<stopTime))
analysisReport += " (finished analysis early) ";
if (debugDisplay)
{
int entriesTTthisTurn = transpositionTable.nbEntries(); //assuming TT is reset each turn
totalNumberOfEntriesTT += entriesTTthisTurn;
System.out.println("Nb of entries in the TT this turn: "+entriesTTthisTurn+" (total: "+totalNumberOfEntriesTT+")");
// transpositionTable.dispValueStats();
}
if (resetTTeachTurn)
{
transpositionTable.deallocate();
if (debugDisplay)
System.out.println("deallocated");
}
if (debugDisplay)
{
System.out.print("rootValueEstimates: (");
for (int i=0; i<currentRootMoves.size(); i++)
{
System.out.print(rootValueEstimates.get(i)+".");
}
System.out.println(")");
}
// To output a visual graph of the search tree:
searchTreeOutput.append("]");
if (savingSearchTreeDescription)
{
try
{
@SuppressWarnings("resource")
FileWriter myWriter = new FileWriter(treeSaveFile);
myWriter.write(searchTreeOutput.toString());
myWriter.close();
System.out.println("Successfully saved search tree in a file.");
}
catch (IOException e)
{
System.out.println("An error occurred.");
e.printStackTrace();
}
}
return finalChoice.move;
}
/**
* Recursive strategy to evaluate the different options on a possible line of actions.
*
* @param context
* @param maximisingPlayer
* @param stopTime
* @param analysisDepth
* @param nodeHashes : list of hashes of states that lead to this state, used to avoid loops and when saving the search tree
* @return the score of the context
*/
protected float minimaxBFS
(
final Context context,
final int maximisingPlayer,
final long stopTime,
final int analysisDepth,
final TLongArrayList nodeHashes
)
{
final Trial trial = context.trial();
final State state = context.state();
final Game game = context.game();
final int mover = state.playerToAgent(state.mover());
final FastArrayList<Move> legalMoves = game.moves(context).moves();
final int numLegalMoves = legalMoves.size();
callsOfMinimax += 1;
if (analysisDepth > maxDepthReached)
maxDepthReached = analysisDepth;
/**
* First we check if the state is terminal (at least for maximising player).
* If so we can just return the value of the state for maximising player
*/
if (trial.over() || !context.active(maximisingPlayer))
return getContextValue(context, maximisingPlayer, nodeHashes, analysisDepth-1);
else if (savingSearchTreeDescription) // we call getContextValue to make sure the node is added to the search tree
getContextValue(context, maximisingPlayer, nodeHashes, analysisDepth);
List<ScoredMove> sortedScoredMoves = null;
final long zobrist = context.state().fullHash(context);
final UBFMTTData tableData = transpositionTable.retrieve(zobrist);
if (tableData != null)
if (tableData.sortedScoredMoves != null)
sortedScoredMoves = new ArrayList<ScoredMove>(tableData.sortedScoredMoves);
float outputScore = Float.NaN; // this value should always be replaced before it is read
if (sortedScoredMoves != null)
{
if (sortedScoredMoves.size() != numLegalMoves)
{
System.err.println("Error sortedScoredMoves.size() != numLegalMoves");
sortedScoredMoves = null;
}
}
boolean firstExploration = false;
if (sortedScoredMoves == null)
{
/**----------------------------------------------------------------
* In this case it is the first full analysis of this state.
* Thus we compute a quick evaluation of all the possible moves to order them before exploration.
*-----------------------------------------------------------------
*/
firstExploration = true;
final FVector moveScores = estimateMovesValues(legalMoves, context, maximisingPlayer, nodeHashes, analysisDepth, stopTime);
// Create a shuffled version of list of moves indices (random tie-breaking)
final FastArrayList<ScoredMove> tempScoredMoves = new FastArrayList<ScoredMove>(numLegalMoves);
for (int i=0; i<numLegalMoves; i++)
{
tempScoredMoves.add(new ScoredMove(legalMoves.get(i), moveScores.get(i), 1));
}
sortedScoredMoves = new ArrayList<ScoredMove>(numLegalMoves);
for (int i=0; i<numLegalMoves; i++)
{
sortedScoredMoves.add(tempScoredMoves.removeSwap(ThreadLocalRandom.current().nextInt(tempScoredMoves.size())));
}
if (mover == maximisingPlayer)
Collections.sort(sortedScoredMoves); //(the natural order of scored Move Indices is decreasing)
else
Collections.sort(sortedScoredMoves, Collections.reverseOrder());
if (analysisDepth==1)
{
for (int k=0; k<numLegalMoves; k++)
rootValueEstimates.set(k,(float) scoreToValueEst(moveScores.get(k), rootAlphaInit, rootBetaInit));
}
outputScore = sortedScoredMoves.get(0).score;
}
if ((!firstExploration) || (fullPlayouts && !wantsInterrupt && (System.currentTimeMillis() < stopTime)))
{
/**----------------------------------------------------------------
* If we already explored this state (or if fullPlayout is true), then we will recursively explore the most promising move
* at this state.
* ----------------------------------------------------------------
*/
final int indexPicked;
switch (explorationPolicy)
{
case BEST:
indexPicked = 0;
break;
case EPSILON_GREEDY:
if (ThreadLocalRandom.current().nextDouble(1.)<selectionEpsilon)
indexPicked = ThreadLocalRandom.current().nextInt(numLegalMoves);
else
indexPicked = 0;
break;
default:
throw new RuntimeException("Unknown exploration policy");
}
final Move bestMove = sortedScoredMoves.get(indexPicked).move;
final int previousNbVisits = sortedScoredMoves.get(indexPicked).nbVisits; //number of times this moves was already tried
final Context contextCopy = copyContext(context);
game.apply(contextCopy, bestMove);
final long newZobrist = contextCopy.state().fullHash(contextCopy);
final float scoreOfMostPromisingMove;
if ((nodeHashes.size() > 100) && (nodeHashes.contains(newZobrist)))
{
// security against infinite loops
// those should only happen if both players prefer avoiding the game to change,
// so we score this path as a draw
if (debugDisplay)
System.out.println("security against infinite loops activated ");
scoreOfMostPromisingMove = 0f;
}
else
{
nodeHashes.add(newZobrist);
/** Recursive call: */
scoreOfMostPromisingMove = minimaxBFS(contextCopy, maximisingPlayer, stopTime, analysisDepth+1, nodeHashes);
nodeHashes.removeAt(nodeHashes.size()-1);
}
// Re-inserting the new value in the list of scored moves, last among moves of equal values
int k = indexPicked; //TODO: put this in a function
while ((k < numLegalMoves-1))
{
if
(
((sortedScoredMoves.get(k+1).score >= scoreOfMostPromisingMove) && (mover == maximisingPlayer))
||
((sortedScoredMoves.get(k+1).score <= scoreOfMostPromisingMove) && (mover != maximisingPlayer))
)
{
sortedScoredMoves.set(k, sortedScoredMoves.get(k+1));
k += 1;
}
else
{
if (k > 0)
{
if
(
((sortedScoredMoves.get(k-1).score < scoreOfMostPromisingMove) && (mover == maximisingPlayer))
||
((sortedScoredMoves.get(k-1).score > scoreOfMostPromisingMove) && (mover != maximisingPlayer))
)
{
sortedScoredMoves.set(k, sortedScoredMoves.get(k-1));
k -= 1;
}
else
break;
}
else
break;
}
}
sortedScoredMoves.set(k, new ScoredMove(bestMove, scoreOfMostPromisingMove, previousNbVisits+1));
if (analysisDepth==1)
rootValueEstimates.set(currentRootMoves.indexOf(bestMove),(float) scoreToValueEst(scoreOfMostPromisingMove, rootAlphaInit, rootBetaInit));
outputScore = sortedScoredMoves.get(0).score;
}
// Updating the transposition table at each call:
transpositionTable.store(zobrist, outputScore, analysisDepth-1, TranspositionTableUBFM.EXACT_VALUE, sortedScoredMoves);
return outputScore;
}
/**
* Compute scores for the moves in argument
*
* @param legalMoves
* @param context
* @param maximisingPlayer
* @param nodeHashes : used when we want to output the tree-search graph
* @param depth : stored in the transposition table (but unused for the moment)
* @param stopTime
* @return a vector with the scores of the moves
*/
protected FVector estimateMovesValues
(
final FastArrayList<Move> legalMoves,
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth,
final long stopTime
)
{
final int numLegalMoves = legalMoves.size();
final Game game = context.game();
final State state = context.state();
final int mover = state.playerToAgent(state.mover());
final FVector moveScores = new FVector(numLegalMoves);
if (savingSearchTreeDescription)
getContextValue(context, maximisingPlayer, nodeHashes, depth-1); //to make sure we recorded the visit of the parent node
for (int i = 0; i < numLegalMoves; ++i)
{
final Move m = legalMoves.get(i);
final Context contextCopy = new TempContext(context);
game.apply(contextCopy, m);
nodeHashes.add(contextCopy.state().fullHash(contextCopy));
final float heuristicScore = getContextValue(contextCopy, maximisingPlayer, nodeHashes, depth);
nodeHashes.removeAt(nodeHashes.size()-1);
moveScores.set(i,heuristicScore);
// If this process is taking to long we abort the process and give the worst possible score (+-1) to the moves not evaluated:
if (System.currentTimeMillis() >= stopTime || ( wantsInterrupt))
{
for (int j=i+1; j<numLegalMoves; j++)
moveScores.set(j, mover == maximisingPlayer ? -BETA_INIT + 1 : BETA_INIT - 1);
break;
}
}
return moveScores;
}
/**
* Method to evaluate a state, with heuristics if the state is not terminal.
*
* @param context
* @param maximisingPlayer
* @param nodeHashes : : used when we want to output the tree-search graph
* @param depth : stored in the transposition table (but unused for the moment)
* @return
*/
protected float getContextValue
(
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth // just used to fill the depth field in the TT which is not important
)
{
boolean valueRetrievedFromMemory = false;
float heuristicScore = 0;
final long zobrist = context.state().fullHash(context);
final State state = context.state();
final UBFMTTData tableData;
tableData = transpositionTable.retrieve(zobrist);
if (tableData != null)
{
// Already searched for data in TT, use results
switch(tableData.valueType)
{
case TranspositionTableUBFM.EXACT_VALUE:
heuristicScore = tableData.value;
valueRetrievedFromMemory = true;
break;
case TranspositionTableUBFM.INVALID_VALUE:
System.err.println("INVALID TRANSPOSITION TABLE DATA: INVALID VALUE");
break;
default:
// bounds are not used up to this point
System.err.println("INVALID TRANSPOSITION TABLE DATA: UNKNOWN");
break;
}
}
// Only compute heuristicScore if we didn't have a score registered in the TT
if (!valueRetrievedFromMemory) {
if (context.trial().over() || !context.active(maximisingPlayer))
{
// terminal node (at least for maximising player)
heuristicScore = (float) RankUtils.agentUtilities(context)[maximisingPlayer] * BETA_INIT;
}
else {
heuristicScore = heuristicValueFunction().computeValue(
context, maximisingPlayer, ABS_HEURISTIC_WEIGHT_THRESHOLD);
for (final int opp : opponents(maximisingPlayer))
{
if (context.active(opp))
heuristicScore -= heuristicValueFunction().computeValue(context, opp, ABS_HEURISTIC_WEIGHT_THRESHOLD);
else if (context.winners().contains(opp))
heuristicScore -= PARANOID_OPP_WIN_SCORE;
}
minHeuristicEval = Math.min(minHeuristicEval, heuristicScore);
maxHeuristicEval = Math.max(maxHeuristicEval, heuristicScore);
}
// Every time a state is evaluated, we store the value in the transposition table (worth?)
transpositionTable.store(zobrist, heuristicScore, depth, TranspositionTableUBFM.EXACT_VALUE, null);
// Invert scores if players swapped (to check)
if (context.state().playerToAgent(maximisingPlayer) != maximisingPlayer)
heuristicScore = -heuristicScore;
nbStatesEvaluated += 1;
}
if (savingSearchTreeDescription)
{
final int newMover = state.playerToAgent(state.mover());
searchTreeOutput.append("("+stringOfNodeHashes(nodeHashes)+","+Float.toString(heuristicScore)+","+((newMover==maximisingPlayer)? 1:2)+"),\n");
}
return heuristicScore;
}
//-------------------------------------------------------------------------
/**
* Converts a score into a value estimate in [-1, 1]. Useful for visualisations.
*
* @param score
* @param alpha
* @param beta
* @return Value estimate in [-1, 1] from unbounded (heuristic) score.
*/
public double scoreToValueEst(final float score, final float alpha, final float beta)
{
if (score <= alpha+10)
return -1.0;
if (score >= beta-10)
return 1.0;
// Map to range [-0.8, 0.8] based on most extreme heuristic evaluations
// observed so far.
return -0.8 + (0.8 - -0.8) * ((score - minHeuristicEval) / (maxHeuristicEval - minHeuristicEval));
}
//-------------------------------------------------------------------------
/**
* @param player
* @return Opponents of given player
*/
public int[] opponents(final int player)
{
final int[] opponents = new int[numPlayersInGame - 1];
int idx = 0;
for (int p = 1; p <= numPlayersInGame; ++p)
{
if (p != player)
opponents[idx++] = p;
}
return opponents;
}
/**
* Initialising the AI (almost the same as with AlphaBeta)
*/
@Override
public void initAI(final Game game, final int playerID)
{
if (heuristicsFromMetadata)
{
if (debugDisplay) System.out.println("Reading heuristics from game metadata...");
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction = new Heuristics(new HeuristicTerm[]{
new Material(null, Float.valueOf(1.f), null, null),
new MobilityAdvanced(null, Float.valueOf(0.001f))
});
}
}
if (heuristicValueFunction() != null)
heuristicValueFunction().init(game);
// reset these things used for visualisation purposes
estimatedRootScore = 0.f;
maxHeuristicEval = 0f;
minHeuristicEval = 0f;
analysisReport = null;
currentRootMoves = null;
rootValueEstimates = null;
totalNumberOfEntriesTT = 0;
// and these things for ExIt
lastSearchedRootContext = null; //always null, so useless?
lastReturnedMove = null;
numPlayersInGame = game.players().count();
transpositionTable = new TranspositionTableUBFM(numBitsPrimaryCodeForTT);
}
@Override
public boolean supportsGame(final Game game)
{
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
if (game.hasSubgames()) // Cant properly init most heuristics
return false;
if (!(game.isAlternatingMoveGame()))
return false;
return true;
}
@Override
public double estimateValue()
{
return scoreToValueEst(estimatedRootScore, rootAlphaInit, rootBetaInit);
}
@Override
public String generateAnalysisReport()
{
return analysisReport;
}
@Override
public AIVisualisationData aiVisualisationData()
{
if (currentRootMoves == null || rootValueEstimates == null)
return null;
final FVector aiDistribution = rootValueEstimates.copy();
aiDistribution.subtract(aiDistribution.min());
return new AIVisualisationData(aiDistribution, rootValueEstimates, currentRootMoves);
}
public Heuristics heuristicValueFunction()
{
return heuristicValueFunction;
}
// ------------------------------------------------------------------------
@Override
public FastArrayList<Move> lastSearchRootMoves()
{
final FastArrayList<Move> moves = new FastArrayList<Move>(currentRootMoves.size());
for (final Move move : currentRootMoves)
{
moves.add(move);
}
return moves;
}
@Override
public FVector computeExpertPolicy(final double tau)
{
final FVector distribution = FVector.zeros(currentRootMoves.size());
distribution.set(currentRootMoves.indexOf(lastReturnedMove), 1.f);
distribution.softmax();
return distribution;
}
@Override
public List<ExItExperience> generateExItExperiences()
{
final FastArrayList<Move> actions = new FastArrayList<Move>(currentRootMoves.size());
for (int i = 0; i < currentRootMoves.size(); ++i)
{
final Move m = new Move(currentRootMoves.get(i));
m.setMover(currentRootMoves.get(i).mover());
m.then().clear(); // Can't serialise these, and won't need them
actions.add(m);
}
final ExItExperience experience =
new ExItExperience
(
new Context(lastSearchedRootContext),
new ExItExperienceState(lastSearchedRootContext),
actions,
computeExpertPolicy(1.0),
FVector.zeros(actions.size()),
1.f
);
return Arrays.asList(experience);
}
//-------------------------------------------------------------------------
/**
* Sets the selection policy used for the final decision of the move to play.
* @param s
*/
public void setSelectionPolicy(final SelectionPolicy s)
{
selectionPolicy = s;
}
/**
* Sets if we want the AI to fully explore one path at each iteration of the algorithm (Descent UBFM).
* @param b
*/
public void setIfFullPlayouts(final boolean b)
{
fullPlayouts = b;
}
/**
* Sets the epsilon value (randomisation parameter) of the best first search.
* @param value
*/
public void setSelectionEpsilon(final float value)
{
selectionEpsilon = value;
}
/**
* Sets if we want the Transposition Table to be reset at each call of selectAction.
* @param value
*/
public void setTTReset(final boolean value)
{
resetTTeachTurn = value;
}
public TranspositionTableUBFM getTranspositionTable()
{
return transpositionTable;
}
/**
* Sets if we want the maximisingPLayer to remain the same regardless of whose turn it is when selectAction is called.
* @parama player
*/
public void forceAMaximisingPlayer(Integer player)
{
forcedMaximisingPlayer = player;
}
//-------------------------------------------------------------------------
public static String stringOfNodeHashes(TLongArrayList nodeHashes)
{
String res = "(";
for (int i=0; i<nodeHashes.size(); ++i)
{
res += Long.toString(nodeHashes.get(i));
res += ",";
}
res += ")";
return res;
}
}
| 32,896 | 30.300666 | 212 | java |
Ludii | Ludii-master/AI/src/search/minimax/UBFMKilothonContender.java | package search.minimax;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import gnu.trove.list.array.TLongArrayList;
import metadata.ai.heuristics.Heuristics;
import metadata.ai.heuristics.terms.HeuristicTerm;
import metadata.ai.heuristics.terms.Material;
import metadata.ai.heuristics.terms.MobilityAdvanced;
import metadata.ai.heuristics.terms.NullHeuristic;
import other.AI;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import policies.softmax.SoftmaxFromMetadataSelection;
import policies.softmax.SoftmaxPolicy;
import search.mcts.MCTS;
import utils.data_structures.transposition_table.TranspositionTableUBFM;
public class UBFMKilothonContender extends UBFM
{
/** A learned policy to use in the selection phase */
protected SoftmaxPolicy learnedSelectionPolicy = null;
private boolean firstTurn;
private int totalNumberOfTurns;
private float avgNumberOfTurns;
private int nbTerminalPlayouts;
private int nbSelectActionCalls;
private float timeLeft;
private boolean stochasticGame = false;
private MCTS UCT_Helper; // for stochastic games
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
nbSelectActionCalls += 1;
if (firstTurn)
{
for (int i=0; i<10; i++)
{
Context contextCopy = new TempContext(context);
final List<NaiveActionBasedSelection> agents = new ArrayList<NaiveActionBasedSelection>();
agents.add(null);
final int nbPlayers = game.players().count();
for (int j=1; j<=nbPlayers; j++)
{
NaiveActionBasedSelection playoutAI = new NaiveActionBasedSelection();
playoutAI.initAI(game, j);
agents.add(playoutAI);
}
game.playout(contextCopy, new ArrayList<AI>(agents), 0.01, null, 0, 120, ThreadLocalRandom.current());
for (int j=1; j<=nbPlayers; j++)
{
totalNumberOfTurns += agents.get(j).getSelectActionNbCalls()*5;
// System.out.println("nb of turns for agent "+Integer.toString(j)+" in this playout is "+Integer.toString(agents.get(j).getSelectActionNbCalls()));
nbTerminalPlayouts += 5;
}
avgNumberOfTurns = ((float) totalNumberOfTurns)/nbTerminalPlayouts;
}
firstTurn = false;
}
float timeForDecision = timeLeft / Math.max(1f,1.5f*(1.5f*avgNumberOfTurns-nbSelectActionCalls));
// System.out.println("Time allowed for this decision : "+Float.toString(timeForDecision));
final long startTime = System.currentTimeMillis();
Move selectedMove;
if (!stochasticGame)
selectedMove = super.selectAction(game, context, timeForDecision, maxIterations, maxDepth);
else
{
selectedMove = UCT_Helper.selectAction(game, context, timeForDecision, maxIterations, maxDepth);
// System.out.println("UCT dealing with this decision");
}
timeLeft -= Math.max(0, (System.currentTimeMillis()-startTime)/1000f );
// System.out.println("Time left : "+Float.toString(timeLeft));
return selectedMove;
}
/**
* Method to evaluate a state, with heuristics if the state is not terminal.
*
* @param context
* @param maximisingPlayer
* @param nodeHashes
* @param depth
* @return
*/
@Override
protected float getContextValue
(
final Context context,
final int maximisingPlayer,
final TLongArrayList nodeHashes,
final int depth // just used to fill the depth field in the TT which is not important
)
{
if (context.trial().over() || !context.active(maximisingPlayer))
{
// System.out.println("state met at depth "+Integer.toString(depth));
// We want the latest information to have a more import weight
totalNumberOfTurns += (depth+nbSelectActionCalls)*nbSelectActionCalls;
nbTerminalPlayouts += nbSelectActionCalls;
avgNumberOfTurns = ((float) totalNumberOfTurns)/nbTerminalPlayouts;
}
return super.getContextValue(context, maximisingPlayer, nodeHashes, depth);
}
/**
* Sets the learned policy to use in Selection phase
* @param policy The policy.
*/
public void setLearnedSelectionPolicy(final SoftmaxPolicy policy)
{
learnedSelectionPolicy = policy;
}
@Override
public boolean supportsGame(final Game game)
{
// We assume this AI can try to do better than random on stochasticGames
if (game.hiddenInformation())
return false;
if (game.hasSubgames()) // Cant properly init most heuristics
return false;
if (!(game.isAlternatingMoveGame()))
return false;
return true;
}
/**
* Initialising the AI (almost the same as with AlphaBeta)
*/
@Override
public void initAI(final Game game, final int playerID)
{
// Fix the parameters:
setSelectionEpsilon(0.2f);
setTTReset(false);
setIfFullPlayouts(false);
savingSearchTreeDescription = false;
debugDisplay = false;
//Initialise new variables:
firstTurn = true;
totalNumberOfTurns = 0;
avgNumberOfTurns = 0f;
nbTerminalPlayouts = 0;
nbSelectActionCalls = 0;
timeLeft = 60f;
if (heuristicsFromMetadata)
{
if (debugDisplay) System.out.println("Reading heuristics from game metadata...");
// Read heuristics from game metadata
final metadata.ai.Ai aiMetadata = game.metadata().ai();
if (aiMetadata != null && aiMetadata.heuristics() != null)
{
heuristicValueFunction = Heuristics.copy(aiMetadata.heuristics());
}
else
{
// construct default heuristic
heuristicValueFunction = new Heuristics(new HeuristicTerm[]{
new Material(null, Float.valueOf(1.f), null, null),
new MobilityAdvanced(null, Float.valueOf(0.05f))
});
}
}
if ((game.metadata().ai().features() != null) || (game.metadata().ai().trainedFeatureTrees() != null))
{
setLearnedSelectionPolicy(new SoftmaxFromMetadataSelection(0.3f));
learnedSelectionPolicy.initAI(game, playerID);
}
if (heuristicValueFunction() != null)
{
try
{ heuristicValueFunction().init(game); }
catch (Exception e)
{
heuristicValueFunction = new Heuristics( new NullHeuristic());
heuristicValueFunction().init(game);
}
}
// reset these things used for visualisation purposes
estimatedRootScore = 0.f;
maxHeuristicEval = 0f;
minHeuristicEval = 0f;
analysisReport = null;
currentRootMoves = null;
rootValueEstimates = null;
if (game.isStochasticGame())
{
stochasticGame = true;
System.out.println("game is stochastic...");
UCT_Helper = MCTS.createUCT();
UCT_Helper.initAI(game, playerID);
}
// and these things for ExIt
lastSearchedRootContext = null; //always null, so useless?
lastReturnedMove = null;
numPlayersInGame = game.players().count();
transpositionTable = new TranspositionTableUBFM(numBitsPrimaryCodeForTT);
}
}
| 6,853 | 25.878431 | 152 | java |
Ludii | Ludii-master/AI/src/search/pns/PNSNode.java | package search.pns;
import java.util.Arrays;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import search.pns.ProofNumberSearch.ProofGoals;
/**
* Node for search trees in PNS
*
* @author Dennis Soemers
*/
public class PNSNode
{
//-------------------------------------------------------------------------
/**
* Nodes types in search trees in PNS
*
* @author Dennis Soemers
*/
public enum PNSNodeTypes
{
/** An OR node */
OR_NODE,
/** An AND node */
AND_NODE
}
/**
* Values of nodes in search trees in PNS
*
* @author Dennis Soemers
*/
public enum PNSNodeValues
{
/** A proven node */
TRUE,
/** A disproven node */
FALSE,
/** Unknown node (yet to prove or disprove) */
UNKNOWN
}
//-------------------------------------------------------------------------
/** Our parent node */
protected final PNSNode parent;
/** Our node type */
protected final PNSNodeTypes nodeType;
/** Context for this node (contains game state) */
protected final Context context;
/** Array of child nodes. */
protected final PNSNode[] children;
/** Array of legal moves in this node's state */
protected final Move[] legalMoves;
/** Whether we have expanded (generated child nodes) */
private boolean expanded = false;
/** Our proof number */
private int proofNumber = -1;
/** Our disproof number */
private int disproofNumber = -1;
/** Our node's value */
private PNSNodeValues value = PNSNodeValues.UNKNOWN;
//-------------------------------------------------------------------------
/**
* Constructor
*
* @param parent
* @param context
* @param proofGoal
* @param proofPlayer
*/
public PNSNode
(
final PNSNode parent,
final Context context,
final ProofGoals proofGoal,
final int proofPlayer
)
{
this.parent = parent;
this.context = context;
final int mover = context.state().mover();
if (mover == proofPlayer)
{
if (proofGoal == ProofGoals.PROVE_WIN)
nodeType = PNSNodeTypes.OR_NODE;
else
nodeType = PNSNodeTypes.AND_NODE;
}
else
{
if (proofGoal == ProofGoals.PROVE_WIN)
nodeType = PNSNodeTypes.AND_NODE;
else
nodeType = PNSNodeTypes.OR_NODE;
}
if (context.trial().over())
{
// we just created a node for a terminal game state,
// so create empty list of actions
legalMoves = new Move[0];
}
else
{
// non-terminal game state, so figure out list of actions we can
// still take
final FastArrayList<Move> actions = context.game().moves(context).moves();
legalMoves = new Move[actions.size()];
actions.toArray(legalMoves);
}
children = new PNSNode[legalMoves.length];
}
//-------------------------------------------------------------------------
/**
* @return Array of child nodes (contains null entries if not expanded)
*/
public PNSNode[] children()
{
return children;
}
/**
* @return Context in this node
*/
public Context context()
{
return context;
}
/**
* Deletes subtree below this node
*/
public void deleteSubtree()
{
Arrays.fill(children, null);
}
/**
* @return Our disproof number
*/
public int disproofNumber()
{
assert (disproofNumber >= 0);
return disproofNumber;
}
/**
* @return True if and only if this node has been expanded
*/
public boolean isExpanded()
{
return expanded;
}
/**
* @return Our node type
*/
public PNSNodeTypes nodeType()
{
return nodeType;
}
/**
* @return Our proof number
*/
public int proofNumber()
{
assert (proofNumber >= 0);
return proofNumber;
}
/**
* Sets our disproof number
* @param disproofNumber
*/
public void setDisproofNumber(final int disproofNumber)
{
this.disproofNumber = disproofNumber;
}
/**
* Sets whether or not we're expanded
* @param expanded
*/
public void setExpanded(final boolean expanded)
{
this.expanded = expanded;
}
/**
* Sets our proof number
* @param proofNumber
*/
public void setProofNumber(final int proofNumber)
{
this.proofNumber = proofNumber;
}
/**
* Sets our node's value
* @param value
*/
public void setValue(final PNSNodeValues value)
{
this.value = value;
}
/**
* @return Our node's value
*/
public PNSNodeValues value()
{
return value;
}
//-------------------------------------------------------------------------
}
| 4,528 | 17.63786 | 80 | java |
Ludii | Ludii-master/AI/src/search/pns/ProofNumberSearch.java | package search.pns;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import other.AI;
import other.context.Context;
import other.move.Move;
import search.pns.PNSNode.PNSNodeTypes;
import search.pns.PNSNode.PNSNodeValues;
/**
* Proof-number search.
*
* @author Dennis Soemers
*/
public class ProofNumberSearch extends AI
{
//-------------------------------------------------------------------------
/**
* Different goals that we can try to prove with PNS
*
* @author Dennis Soemers
*/
public enum ProofGoals
{
/** If we want to prove that a position is a win for current mover */
PROVE_WIN,
/** If we want to prove that a position is a loss for current mover */
PROVE_LOSS
}
//-------------------------------------------------------------------------
/** Our proof goal */
protected final ProofGoals proofGoal;
/** The player for which we aim to prove either a win or a loss */
protected int proofPlayer = -1;
/** The best possible rank we can get from the root state we're searching for */
protected double bestPossibleRank = -1.0;
/** The worst possible rank we can get from the root state we're searching for */
protected double worstPossibleRank = -1.0;
//-------------------------------------------------------------------------
/**
* Constructor
*/
public ProofNumberSearch()
{
this(ProofGoals.PROVE_WIN);
}
/**
* Constructor
*
* @param proofGoal
*/
public ProofNumberSearch(final ProofGoals proofGoal)
{
friendlyName = "Proof-Number Search";
this.proofGoal = proofGoal;
}
//-------------------------------------------------------------------------
@Override
public Move selectAction
(
final Game game,
final Context context,
final double maxSeconds,
final int maxIterations,
final int maxDepth
)
{
//System.out.println("WARNING! This does not yet appear to be correct in all cases!");
bestPossibleRank = context.computeNextWinRank();
worstPossibleRank = context.computeNextLossRank();
if (proofPlayer != context.state().mover())
{
System.err.println("Warning: Current mover = " + context.state().mover() + ", but proof player = " + proofPlayer + "!");
}
final PNSNode root = new PNSNode(null, copyContext(context), proofGoal, proofPlayer);
evaluate(root);
setProofAndDisproofNumbers(root);
PNSNode currentNode = root;
while (root.proofNumber() != 0 && root.disproofNumber() != 0)
{
final PNSNode mostProvingNode = selectMostProvingNode(currentNode);
expandNode(mostProvingNode);
currentNode = updateAncestors(mostProvingNode);
}
// System.out.println();
// System.out.println("Proof goal = " + proofGoal);
// System.out.println("Proof player = " + proofPlayer);
// System.out.println("Root type = " + root.nodeType());
// System.out.println("root pn = " + root.proofNumber());
// System.out.println("root dn = " + root.disproofNumber());
// for (int i = 0; i < root.children.length; ++i)
// {
// if (root.children[i] == null)
// {
// System.out.println("child " + i + " = null");
// }
// else
// {
// System.out.println("child " + i + " pn = " + root.children[i].proofNumber());
// System.out.println("child " + i + " dn = " + root.children[i].disproofNumber());
// }
// }
if (proofGoal == ProofGoals.PROVE_WIN)
{
if (root.proofNumber() == 0)
System.out.println("Proved a win!");
else
System.out.println("Disproved a win!");
}
else
{
if (root.proofNumber() == 0)
System.out.println("Proved a loss!");
else
System.out.println("Disproved a loss!");
}
return root.legalMoves[ThreadLocalRandom.current().nextInt(root.legalMoves.length)];
}
//-------------------------------------------------------------------------
/**
* Evaluates the given node
* @param node
*/
private void evaluate(final PNSNode node)
{
final Context context = node.context();
if (context.trial().over())
{
final double rank = context.trial().ranking()[proofPlayer];
if (rank == bestPossibleRank)
{
if (proofGoal == ProofGoals.PROVE_WIN)
node.setValue(PNSNodeValues.TRUE);
else
node.setValue(PNSNodeValues.FALSE);
}
else if (rank == worstPossibleRank)
{
if (proofGoal == ProofGoals.PROVE_WIN)
node.setValue(PNSNodeValues.FALSE);
else
node.setValue(PNSNodeValues.TRUE);
}
else
{
node.setValue(PNSNodeValues.FALSE);
}
}
else
{
node.setValue(PNSNodeValues.UNKNOWN);
}
}
/**
* Sets proof and disproof numbers for given node
* @param node
*/
private static void setProofAndDisproofNumbers(final PNSNode node)
{
if (node.isExpanded()) // internal node
{
if (node.nodeType() == PNSNodeTypes.AND_NODE)
{
node.setProofNumber(0);
node.setDisproofNumber(Integer.MAX_VALUE);
for (final PNSNode child : node.children())
{
if (node.proofNumber() == Integer.MAX_VALUE || child.proofNumber() == Integer.MAX_VALUE)
node.setProofNumber(Integer.MAX_VALUE);
else
node.setProofNumber(node.proofNumber() + child.proofNumber());
if (child != null && child.disproofNumber() < node.disproofNumber())
node.setDisproofNumber(child.disproofNumber());
}
}
else // OR node
{
node.setProofNumber(Integer.MAX_VALUE);
node.setDisproofNumber(0);
for (final PNSNode child : node.children())
{
if (node.disproofNumber() == Integer.MAX_VALUE || child.disproofNumber() == Integer.MAX_VALUE)
node.setDisproofNumber(Integer.MAX_VALUE);
else
node.setDisproofNumber(node.disproofNumber() + child.disproofNumber());
if (child != null && child.proofNumber() < node.proofNumber())
node.setProofNumber(child.proofNumber());
}
}
}
else // leaf node
{
switch(node.value())
{
case FALSE:
node.setProofNumber(Integer.MAX_VALUE);
node.setDisproofNumber(0);
break;
case TRUE:
node.setProofNumber(0);
node.setDisproofNumber(Integer.MAX_VALUE);
break;
case UNKNOWN:
// Init as described in 7.1 of
// "GAME-TREE SEARCH USING PROOF NUMBERS: HE FIRST TWENTY YEARS"
if (node.nodeType() == PNSNodeTypes.AND_NODE)
{
node.setProofNumber(Math.max(1, node.children.length));
node.setDisproofNumber(1);
}
else // OR node
{
node.setProofNumber(1);
node.setDisproofNumber(Math.max(1, node.children.length));
}
break;
}
}
}
/**
* @param inCurrentNode
* @return Most proving node in subtree rooted in given current node
*/
private static PNSNode selectMostProvingNode(final PNSNode inCurrentNode)
{
//System.out.println();
//System.out.println("starting");
PNSNode current = inCurrentNode;
while (current.isExpanded())
{
final PNSNode[] children = current.children();
int nextIdx = 0;
PNSNode next = children[nextIdx];
if (current.nodeType() == PNSNodeTypes.OR_NODE)
{
while (true)
{
if (next != null)
{
//System.out.println("next not null");
if (next.proofNumber() == current.proofNumber())
break;
}
++nextIdx;
if (nextIdx < children.length)
next = children[nextIdx];
else
break;
}
}
else // AND node
{
while (true)
{
if (next != null)
{
//System.out.println("next not null");
if (next.disproofNumber() == current.disproofNumber())
break;
}
++nextIdx;
if (nextIdx < children.length)
next = children[nextIdx];
else
break;
}
}
current = next;
}
return current;
}
/**
* Expands the given node
* @param node
*/
private void expandNode(final PNSNode node)
{
final PNSNode[] children = node.children();
for (int i = 0; i < children.length; ++i)
{
final Context newContext = new Context(node.context());
newContext.game().apply(newContext, node.legalMoves[i]);
final PNSNode child = new PNSNode(node, newContext, proofGoal, proofPlayer);
children[i] = child;
evaluate(child);
setProofAndDisproofNumbers(child);
if
(
(node.nodeType() == PNSNodeTypes.OR_NODE && child.proofNumber() == 0) ||
(node.nodeType() == PNSNodeTypes.AND_NODE && child.disproofNumber() == 0)
)
{
break;
}
}
node.setExpanded(true);
}
/**
* Updates proof and disproof numbers for all ancestors of given node
* @param inNode
* @return Node from which to search for next most proving node
*/
private static PNSNode updateAncestors(final PNSNode inNode)
{
PNSNode node = inNode;
do
{
final int oldProof = node.proofNumber();
final int oldDisproof = node.disproofNumber();
setProofAndDisproofNumbers(node);
if (node.proofNumber() == oldProof && node.disproofNumber() == oldDisproof)
{
// No change on the path
return node;
}
// Delete (dis)proved subtrees
if (node.proofNumber() == 0 || node.disproofNumber() == 0)
node.deleteSubtree();
if (node.parent == null)
return node;
node = node.parent;
}
while (true);
}
//-------------------------------------------------------------------------
@Override
public void initAI(final Game game, final int playerID)
{
proofPlayer = playerID;
}
@Override
public boolean supportsGame(final Game game)
{
if (game.players().count() != 2)
return false;
if (game.isStochasticGame())
return false;
if (game.hiddenInformation())
return false;
return game.isAlternatingMoveGame();
}
//-------------------------------------------------------------------------
}
| 9,664 | 22.923267 | 123 | java |
Ludii | Ludii-master/AI/src/training/ExperienceSample.java | package training;
import java.util.BitSet;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.move.Move;
import other.state.State;
/**
* Abstract class for a sample of experience
*
* @author Dennis Soemers
*/
public abstract class ExperienceSample
{
//-------------------------------------------------------------------------
/**
* Should be implemented to (generate and) return feature vectors corresponding
* to the moves that were legal in this sample of experience. Can use the given
* feature set to generate them, but can also return already-cached ones.
*
* @param featureSet
* @return Feature vectors corresponding to this sample of experience
*/
public abstract FeatureVector[] generateFeatureVectors(final BaseFeatureSet featureSet);
/**
* Should be implemented to return an expert distribution over actions.
*
* @return Expert distribution over actions
*/
public abstract FVector expertDistribution();
/**
* @return Game state
*/
public abstract State gameState();
/**
* @return From-position, for features, from last decision move.
*/
public abstract int lastFromPos();
/**
* @return To-position, for features, from last decision move.
*/
public abstract int lastToPos();
/**
* @return List of legal moves
*/
public abstract FastArrayList<Move> moves();
/**
* @return BitSet of winning moves
*/
public abstract BitSet winningMoves();
/**
* @return BitSet of losing moves
*/
public abstract BitSet losingMoves();
/**
* @return BitSet of anti-defeating moves
*/
public abstract BitSet antiDefeatingMoves();
//-------------------------------------------------------------------------
}
| 1,808 | 22.493506 | 89 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/ExItExperience.java | package training.expert_iteration;
import java.io.Serializable;
import java.util.BitSet;
import features.FeatureVector;
import features.feature_sets.BaseFeatureSet;
import features.spatial.FeatureUtils;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.context.Context;
import other.move.Move;
import other.state.State;
import training.ExperienceSample;
/**
* A single sample of experience for Expert Iteration.
* Contains a trial, a list of actions, a distribution
* over those actions resulting from an MCTS search process,
* and value estimates per action as computed by MCTS.
*
* @author Dennis Soemers
*/
public class ExItExperience extends ExperienceSample implements Serializable
{
//-------------------------------------------------------------------------
/** */
private static final long serialVersionUID = 1L;
/** Context for which experience was generated (transient, not serialised) */
protected transient final Context context;
/** Game state (+ last decision move, wrapped in wrapper class) */
protected final ExItExperienceState state;
/** Legal actions in the game state */
protected final FastArrayList<Move> moves;
/** Distribution over actions computed by Expert (e.g. MCTS) */
protected final FVector expertDistribution;
/** Value estimates computed by Expert (e.g. MCTS) */
protected final FVector expertValueEstimates;
/** Feature vector for state (heuristic terms) */
protected FVector stateFeatureVector;
/** Duration of full episode in which this experience was generated */
protected int episodeDuration = -1;
/** Outcomes at the end of the game in which this experience occurred (one per agent) */
protected double[] playerOutcomes = null;
/** Which legal moves are winning moves? */
protected final BitSet winningMoves = new BitSet();
/** Which legal moves are losing moves? */
protected final BitSet losingMoves = new BitSet();
/** Which legal moves are anti-defeating moves? */
protected final BitSet antiDefeatingMoves = new BitSet();
/** Importance sampling weight assigned to this sample by Prioritized Experience Replay */
protected float weightPER = -1.f;
/** Importance sampling weight for CE Explore */
protected float weightCEExplore = -1.f;
/** Importance sampling weight assigned to this sample based on tree search visit count */
protected final float weightVisitCount;
/** The index in replay buffer from which we sampled this if using PER */
protected int bufferIdx = -1;
//-------------------------------------------------------------------------
/**
* Constructor
* @param context
* @param state
* @param moves
* @param expertDistribution
* @param expertValueEstimates
* @param weightVisitCount
*/
public ExItExperience
(
final Context context,
final ExItExperienceState state,
final FastArrayList<Move> moves,
final FVector expertDistribution,
final FVector expertValueEstimates,
final float weightVisitCount
)
{
this.context = context;
this.state = state;
this.moves = moves;
this.expertDistribution = expertDistribution;
this.expertValueEstimates = expertValueEstimates;
this.weightVisitCount = weightVisitCount;
}
//-------------------------------------------------------------------------
/**
* @return Context
*/
public Context context()
{
return context;
}
/**
* @return state
*/
public ExItExperienceState state()
{
return state;
}
@Override
public FastArrayList<Move> moves()
{
return moves;
}
/**
* @return The index in replay buffer from which we sampled this if using PER
*/
public int bufferIdx()
{
return bufferIdx;
}
@Override
public FVector expertDistribution()
{
final FVector adjustedExpertDistribution = expertDistribution.copy();
if (!winningMoves.isEmpty() || !losingMoves.isEmpty() || !antiDefeatingMoves.isEmpty())
{
final float maxVal = adjustedExpertDistribution.max();
final float minVal = adjustedExpertDistribution.min();
// Put high (but less than winning) values on anti-defeating moves
for (int i = antiDefeatingMoves.nextSetBit(0); i >= 0; i = antiDefeatingMoves.nextSetBit(i + 1))
{
adjustedExpertDistribution.set(i, maxVal);
}
// Put large values on winning moves
for (int i = winningMoves.nextSetBit(0); i >= 0; i = winningMoves.nextSetBit(i + 1))
{
adjustedExpertDistribution.set(i, maxVal * 2.f);
}
// Put low values on losing moves
for (int i = losingMoves.nextSetBit(0); i >= 0; i = losingMoves.nextSetBit(i + 1))
{
adjustedExpertDistribution.set(i, minVal / 2.f);
}
// Re-normalise to probability distribution
adjustedExpertDistribution.normalise();
}
return adjustedExpertDistribution;
}
/**
* @return Value estimates computed by expert (MCTS)
*/
public FVector expertValueEstimates()
{
return expertValueEstimates;
}
/**
* @return Duration of full episode in which this experience was generated
*/
public int episodeDuration()
{
return episodeDuration;
}
/**
* @return Array of outcomes (one per player) of episode in which this experience was generated
*/
public double[] playerOutcomes()
{
return playerOutcomes;
}
/**
* Sets the index in replay buffer from which we sampled this if using PER
* @param bufferIdx
*/
public void setBufferIdx(final int bufferIdx)
{
this.bufferIdx = bufferIdx;
}
/**
* Sets the episode duration
* @param episodeDuration
*/
public void setEpisodeDuration(final int episodeDuration)
{
this.episodeDuration = episodeDuration;
}
/**
* Sets the per-player outcomes for the episode in which this experience was generated
* @param playerOutcomes
*/
public void setPlayerOutcomes(final double[] playerOutcomes)
{
this.playerOutcomes = playerOutcomes;
}
/**
* Sets our state-feature-vector (for state value functions)
* @param vector
*/
public void setStateFeatureVector(final FVector vector)
{
this.stateFeatureVector = vector;
}
/**
* Set which moves are winning moves
* @param winningMoves
*/
public void setWinningMoves(final BitSet winningMoves)
{
this.winningMoves.clear();
this.winningMoves.or(winningMoves);
}
/**
* Set which moves are losing moves
* @param losingMoves
*/
public void setLosingMoves(final BitSet losingMoves)
{
this.losingMoves.clear();
this.losingMoves.or(losingMoves);
}
/**
* Set which moves are anti-defeating moves
* @param antiDefeatingMoves
*/
public void setAntiDefeatingMoves(final BitSet antiDefeatingMoves)
{
this.antiDefeatingMoves.clear();
this.antiDefeatingMoves.or(antiDefeatingMoves);
}
/**
* Sets the importance sampling weight assigned to this sample by CE Explore
* @param weightCEExplore
*/
public void setWeightCEExplore(final float weightCEExplore)
{
this.weightCEExplore = weightCEExplore;
}
/**
* Sets the importance sampling weight assigned to this sample by Prioritized Experience Replay
* @param weightPER
*/
public void setWeightPER(final float weightPER)
{
this.weightPER = weightPER;
}
/**
* @return State feature vector
*/
public FVector stateFeatureVector()
{
return stateFeatureVector;
}
/**
* @return Importance sampling weight for CE exploration
*/
public float weightCEExplore()
{
return weightCEExplore;
}
/**
* @return Importance sampling weight assigned to this sample by Prioritized Experience Replay
*/
public float weightPER()
{
return weightPER;
}
/**
* @return Importance sampling weight assigned to this sampled based on tree search visit count
*/
public float weightVisitCount()
{
return weightVisitCount;
}
@Override
public State gameState()
{
return state.state();
}
@Override
public int lastFromPos()
{
return FeatureUtils.fromPos(state.lastDecisionMove());
}
@Override
public int lastToPos()
{
return FeatureUtils.toPos(state.lastDecisionMove());
}
@Override
public BitSet winningMoves()
{
return winningMoves;
}
@Override
public BitSet losingMoves()
{
return losingMoves;
}
@Override
public BitSet antiDefeatingMoves()
{
return antiDefeatingMoves;
}
//-------------------------------------------------------------------------
@Override
public FeatureVector[] generateFeatureVectors(final BaseFeatureSet featureSet)
{
return featureSet.computeFeatureVectors
(
state().state(),
state().lastDecisionMove(),
moves(),
false
);
}
//-------------------------------------------------------------------------
/**
* Wrapper class for game states in an ExIt experience buffer.
* Contains game state + last decision move (which we need access
* to for reactive features).
*
* @author Dennis Soemers
*/
public static final class ExItExperienceState implements Serializable
{
/** */
private static final long serialVersionUID = 1L;
/** Game state */
private final State state;
/** Last decision move */
private final Move lastDecisionMove;
/**
* Constructor
* @param context
*/
public ExItExperienceState(final Context context)
{
state = context.state();
lastDecisionMove = context.trial().lastMove();
}
/**
* @return Game state
*/
public State state()
{
return state;
}
/**
* @return Last decision move
*/
public Move lastDecisionMove()
{
return lastDecisionMove;
}
}
//-------------------------------------------------------------------------
}
| 9,551 | 22.016867 | 99 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/ExpertIteration.java | package training.expert_iteration;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import features.FeatureVector;
import features.WeightVector;
import features.feature_sets.BaseFeatureSet;
import features.feature_sets.network.JITSPatterNetFeatureSet;
import features.generation.AtomicFeatureGenerator;
import function_approx.BoostedLinearFunction;
import function_approx.LinearFunction;
import game.Game;
import gnu.trove.list.array.TDoubleArrayList;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import gnu.trove.list.array.TLongArrayList;
import main.CommandLineArgParse;
import main.CommandLineArgParse.ArgOption;
import main.CommandLineArgParse.OptionTypes;
import main.FileHandling;
import main.StringRoutines;
import main.collections.FVector;
import main.collections.FastArrayList;
import main.grammar.Report;
import metadata.ai.heuristics.Heuristics;
import optimisers.Optimiser;
import optimisers.OptimiserFactory;
import other.GameLoader;
import other.RankUtils;
import other.context.Context;
import other.context.TempContext;
import other.move.Move;
import other.trial.Trial;
import policies.softmax.SoftmaxPolicyLinear;
import search.mcts.MCTS;
import training.expert_iteration.gradients.Gradients;
import training.expert_iteration.menageries.Menagerie;
import training.expert_iteration.menageries.Menagerie.DrawnAgentsData;
import training.expert_iteration.menageries.NaiveSelfPlay;
import training.expert_iteration.menageries.TournamentMenagerie;
import training.expert_iteration.params.AgentsParams;
import training.expert_iteration.params.FeatureDiscoveryParams;
import training.expert_iteration.params.GameParams;
import training.expert_iteration.params.ObjectiveParams;
import training.expert_iteration.params.OptimisersParams;
import training.expert_iteration.params.OutParams;
import training.expert_iteration.params.OutParams.CheckpointTypes;
import training.expert_iteration.params.TrainingParams;
import training.feature_discovery.CorrelationBasedExpander;
import training.feature_discovery.CorrelationErrorSignExpander;
import training.feature_discovery.FeatureSetExpander;
import training.feature_discovery.RandomExpander;
import training.feature_discovery.SpecialMovesCorrelationExpander;
import training.policy_gradients.Reinforce;
import utils.AIUtils;
import utils.ExperimentFileUtils;
import utils.ExponentialMovingAverage;
import utils.data_structures.experience_buffers.ExperienceBuffer;
import utils.data_structures.experience_buffers.PrioritizedReplayBuffer;
import utils.data_structures.experience_buffers.UniformExperienceBuffer;
import utils.experiments.InterruptableExperiment;
/**
* Implementation of the Expert Iteration self-play training framework,
* with additional support for feature learning instead of the standard
* DNNs (see our various papers).
*
* Currently, this is a sequential implementation, where experience generation
* and training are all performed on a single thread.
*
* @author Dennis Soemers
*/
public class ExpertIteration
{
//-------------------------------------------------------------------------
/** Format used for checkpoints based on training game count */
private static final String gameCheckpointFormat = "%s_%05d.%s";
/** Format used for checkpoints based on weight update count */
private static final String weightUpdateCheckpointFormat = "%s_%08d.%s";
//-------------------------------------------------------------------------
/** Game configuration */
protected final GameParams gameParams = new GameParams();
/** Agents configuration */
protected final AgentsParams agentsParams = new AgentsParams();
/** Basic training params */
protected final TrainingParams trainingParams = new TrainingParams();
/** Feature discovery params */
protected final FeatureDiscoveryParams featureDiscoveryParams = new FeatureDiscoveryParams();
/** Objective function(s) params */
protected final ObjectiveParams objectiveParams = new ObjectiveParams();
/** Optimiser(s) params */
protected final OptimisersParams optimisersParams = new OptimisersParams();
/** Output / file writing params */
protected final OutParams outParams = new OutParams();
/*
* Auxiliary experiment setup
*/
/**
* Whether to create a small GUI that can be used to manually interrupt training run.
* False by default.
*/
protected boolean useGUI;
/** Max wall time in minutes (or -1 for no limit) */
protected int maxWallTime;
//-------------------------------------------------------------------------
/**
* Constructor. No GUI for interrupting experiment, no wall time limit.
*/
public ExpertIteration()
{
// Do nothing
}
/**
* Constructor. No wall time limit.
* @param useGUI
*/
public ExpertIteration(final boolean useGUI)
{
this.useGUI = useGUI;
}
/**
* Constructor
* @param useGUI
* @param maxWallTime Wall time limit in minutes.
*/
public ExpertIteration(final boolean useGUI, final int maxWallTime)
{
this.useGUI = useGUI;
this.maxWallTime = maxWallTime;
}
//-------------------------------------------------------------------------
/**
* Starts the experiment
*/
public void startExperiment()
{
try (final PrintWriter logWriter = createLogWriter())
{
startTraining(logWriter);
}
}
//-------------------------------------------------------------------------
/**
* Start the training run
*/
private void startTraining(final PrintWriter logWriter)
{
final Game game;
if (gameParams.ruleset != null && !gameParams.ruleset.equals(""))
game = GameLoader.loadGameFromName(gameParams.gameName, gameParams.ruleset);
else
game = GameLoader.loadGameFromName(gameParams.gameName, gameParams.gameOptions);
final int numPlayers = game.players().count();
if (gameParams.gameLengthCap >= 0)
game.setMaxTurns(Math.min(gameParams.gameLengthCap, game.getMaxTurnLimit()));
@SuppressWarnings("unused")
final InterruptableExperiment experiment = new InterruptableExperiment(useGUI, maxWallTime)
{
//-----------------------------------------------------------------
/** Last checkpoint for which we've saved files */
protected long lastCheckpoint;
/** Filenames corresponding to our current Feature Sets */
protected String[] currentFeatureSetFilenames;
/** Filenames corresponding to our current policy weights optimised for Selection */
protected String[] currentPolicyWeightsSelectionFilenames;
/** Filenames corresponding to our current policy weights optimised for Playouts */
protected String[] currentPolicyWeightsPlayoutFilenames;
/** Filenames corresponding to our current policy weights optimised for TSPG */
protected String[] currentPolicyWeightsTSPGFilenames;
/** Filenames corresponding to our current policy weights optimised for CE exploration */
protected String[] currentPolicyWeightsCEEFilenames;
/** Filename for our current heuristics and weights for value function */
protected String currentValueFunctionFilename;
/** Filenames corresponding to our current experience buffers */
protected String[] currentExperienceBufferFilenames;
/** Filenames corresponding to our current experience buffers for special moves */
protected String[] currentSpecialMoveExperienceBufferFilenames;
/** Filenames corresponding to our current experience buffers for final states */
protected String[] currentFinalStatesExperienceBufferFilenames;
/** Filenames corresponding to our current moving average trackers of game durations */
protected String[] currentGameDurationTrackerFilenames;
/** Filenames corresponding to our current optimisers for Selection policy */
protected String[] currentOptimiserSelectionFilenames;
/** Filenames corresponding to our current optimisers for Playout policy */
protected String[] currentOptimiserPlayoutFilenames;
/** Filenames corresponding to our current optimisers for TSPG */
protected String[] currentOptimiserTSPGFilenames;
/** Filenames corresponding to our current optimisers for CE exploration */
protected String[] currentOptimiserCEEFilenames;
/** Filename corresponding to our current optimiser for Value function */
protected String currentOptimiserValueFilename;
/**
* Init class members. Cant do this in field declarations because
* runExperiment() is called inside constructor of parent class.
*/
private void initMembers()
{
lastCheckpoint = Long.MAX_VALUE;
currentFeatureSetFilenames = new String[numPlayers + 1];
currentPolicyWeightsSelectionFilenames = new String[numPlayers + 1];
currentPolicyWeightsPlayoutFilenames = new String[numPlayers + 1];
currentPolicyWeightsTSPGFilenames = new String[numPlayers + 1];
currentPolicyWeightsCEEFilenames = new String[numPlayers + 1];
currentValueFunctionFilename = null;
currentExperienceBufferFilenames = new String[numPlayers + 1];
currentSpecialMoveExperienceBufferFilenames = new String[numPlayers + 1];
currentFinalStatesExperienceBufferFilenames = new String[numPlayers + 1];
currentGameDurationTrackerFilenames = new String[numPlayers + 1];
currentOptimiserSelectionFilenames = new String[numPlayers + 1];
currentOptimiserPlayoutFilenames = new String[numPlayers + 1];
currentOptimiserTSPGFilenames = new String[numPlayers + 1];
currentOptimiserCEEFilenames = new String[numPlayers + 1];
currentOptimiserValueFilename = null;
}
//-----------------------------------------------------------------
@Override
public void runExperiment()
{
if (outParams.outDir == null)
System.err.println("Warning: we're not writing any output files for this run!");
else if (!outParams.outDir.exists())
outParams.outDir.mkdirs();
initMembers();
// Don't want our MCTSes to null undo data in Trial objects, we need that data sometimes
MCTS.NULL_UNDO_DATA = false;
// TODO add log statements describing complete setup of experiment
// Create menagerie
final Menagerie menagerie;
if (agentsParams.tournamentMode)
menagerie = new TournamentMenagerie();
else
menagerie = new NaiveSelfPlay();
// Prepare our feature sets
BaseFeatureSet[] featureSets = prepareFeatureSets();
// For every feature set, a list for every feature of its lifetime (how often it could've been active)
final TLongArrayList[] featureLifetimes = new TLongArrayList[featureSets.length];
// For every feature set, a list for every feature of the ratio of cases in which it was active
final TDoubleArrayList[] featureActiveRatios = new TDoubleArrayList[featureSets.length];
// For every feature set, a list for every feature of how frequently we observed it being active
final TLongArrayList[] featureOccurrences = new TLongArrayList[featureSets.length];
// For every feature set, a BitSet containing features that are (or could be) 100% winning moves
final BitSet[] winningMovesFeatures = new BitSet[featureSets.length];
// For every feature set, a BitSet containing features that are (or could be) 100% losing moves
final BitSet[] losingMovesFeatures = new BitSet[featureSets.length];
// For every feature set, a BitSet containing features that are (or could be) anti-moves for subsequent opponent moves that defeat us
// (like anti-decisive moves, but in games with more than 2 players the distinction between anti-opponent-winning vs. anti-us-losing
// is important)
final BitSet[] antiDefeatingMovesFeatures = new BitSet[featureSets.length];
for (int i = 0; i < featureSets.length; ++i)
{
if (featureSets[i] != null)
{
final TLongArrayList featureLifetimesList = new TLongArrayList();
featureLifetimesList.fill(0, featureSets[i].getNumSpatialFeatures(), 0L);
featureLifetimes[i] = featureLifetimesList;
final TDoubleArrayList featureActiveRatiosList = new TDoubleArrayList();
featureActiveRatiosList.fill(0, featureSets[i].getNumSpatialFeatures(), 0.0);
featureActiveRatios[i] = featureActiveRatiosList;
final TLongArrayList featureOccurrencesList = new TLongArrayList();
featureOccurrencesList.fill(0, featureSets[i].getNumSpatialFeatures(), 0L);
featureOccurrences[i] = featureOccurrencesList;
final BitSet winningMovesSet = new BitSet();
winningMovesSet.set(0, featureSets[i].getNumSpatialFeatures());
winningMovesFeatures[i] = winningMovesSet;
final BitSet losingMovesSet = new BitSet();
losingMovesSet.set(0, featureSets[i].getNumSpatialFeatures());
losingMovesFeatures[i] = losingMovesSet;
final BitSet antiDefeatingMovesSet = new BitSet();
antiDefeatingMovesSet.set(0, featureSets[i].getNumSpatialFeatures());
antiDefeatingMovesFeatures[i] = antiDefeatingMovesSet;
}
}
// prepare our linear functions
final LinearFunction[] selectionFunctions = prepareSelectionFunctions(featureSets);
final LinearFunction[] playoutFunctions = preparePlayoutFunctions(featureSets);
final LinearFunction[] tspgFunctions = prepareTSPGFunctions(featureSets, selectionFunctions);
// create our policies
final SoftmaxPolicyLinear selectionPolicy =
new SoftmaxPolicyLinear
(
selectionFunctions,
featureSets,
agentsParams.maxNumBiasedPlayoutActions
);
final SoftmaxPolicyLinear playoutPolicy =
new SoftmaxPolicyLinear
(
playoutFunctions,
featureSets,
agentsParams.maxNumBiasedPlayoutActions
);
final SoftmaxPolicyLinear tspgPolicy =
new SoftmaxPolicyLinear
(
tspgFunctions,
featureSets,
agentsParams.maxNumBiasedPlayoutActions
);
final FeatureSetExpander featureSetExpander;
switch (featureDiscoveryParams.expanderType)
{
case "CorrelationBasedExpander":
featureSetExpander = new CorrelationBasedExpander();
break;
case "CorrelationErrorSignExpander":
featureSetExpander = new CorrelationErrorSignExpander();
break;
case "Random":
featureSetExpander = new RandomExpander();
break;
default:
System.err.println("Did not recognise feature set expander type: " + featureDiscoveryParams.expanderType);
return;
}
final FeatureSetExpander specialMovesExpander = new SpecialMovesCorrelationExpander();
// create our value function
final Heuristics valueFunction = prepareValueFunction();
// construct optimisers
final Optimiser[] selectionOptimisers = prepareSelectionOptimisers();
final Optimiser[] playoutOptimisers = preparePlayoutOptimisers();
final Optimiser[] tspgOptimisers = prepareTSPGOptimisers();
final Optimiser valueFunctionOptimiser = prepareValueFunctionOptimiser();
// Initialise menagerie's population
menagerie.initialisePopulation
(
game, agentsParams,
AIUtils.generateFeaturesMetadata(selectionPolicy, playoutPolicy),
Heuristics.copy(valueFunction)
);
// instantiate trial / context
final Trial trial = new Trial(game);
final Context context = new Context(game, trial);
// Prepare our replay buffers (we use one per player)
final ExperienceBuffer[] experienceBuffers = prepareExperienceBuffers(trainingParams.prioritizedExperienceReplay);
final ExperienceBuffer[] specialMoveExperienceBuffers = prepareSpecialMoveExperienceBuffers();
// Keep track of average game duration (separate per player)
final ExponentialMovingAverage[] avgGameDurations = prepareGameDurationTrackers();
// Our big game-playing loop
long actionCounter = 0L;
long weightsUpdateCounter = (outParams.checkpointType == CheckpointTypes.WeightUpdate) ? lastCheckpoint : 0L;
int gameCounter = 0;
if (outParams.checkpointType == CheckpointTypes.Game && lastCheckpoint >= 0L)
{
gameCounter = (int)lastCheckpoint;
trainingParams.numTrainingGames += lastCheckpoint;
}
else if (trainingParams.numPolicyGradientEpochs > 0)
{
// Start out with some policy gradients
featureSets = Reinforce.runSelfPlayPG
(
game,
selectionPolicy,
playoutPolicy,
tspgPolicy,
featureSets,
featureSetExpander,
instantiateCrossEntropyOptimisers(),
objectiveParams,
featureDiscoveryParams,
trainingParams,
logWriter,
this
);
// Scale down all the policy weights obtained from PG
for (int i = 0; i < playoutPolicy.linearFunctions().length; ++i)
{
final LinearFunction linFunc = playoutPolicy.linearFunctions()[i];
if (linFunc == null)
continue;
final FVector weights = linFunc.trainableParams().allWeights();
weights.mult((float) trainingParams.postPGWeightScalar);
// Also copy the weights over into selection policy
selectionPolicy.linearFunctions()[i].trainableParams().allWeights().copyFrom(weights, 0, 0, weights.dim());
}
for (int p = 1; p <= numPlayers; ++p)
{
// Add new entries for lifetime, average activity, occurrences, and winning/losing/anti-defeating
winningMovesFeatures[p].set(featureActiveRatios[p].size(), featureSets[p].getNumSpatialFeatures());
losingMovesFeatures[p].set(featureActiveRatios[p].size(), featureSets[p].getNumSpatialFeatures());
antiDefeatingMovesFeatures[p].set(featureActiveRatios[p].size(), featureSets[p].getNumSpatialFeatures());
while (featureActiveRatios[p].size() < featureSets[p].getNumSpatialFeatures())
{
featureActiveRatios[p].add(0.0);
featureLifetimes[p].add(0L);
featureOccurrences[p].add(0L);
}
}
// // Reset all the weights (just keep features, untrained)
// for (final LinearFunction func : cePolicy.linearFunctions())
// {
// if (func != null)
// func.trainableParams().allWeights().mult(0.f);
// }
}
for (/**/; gameCounter < trainingParams.numTrainingGames; ++gameCounter)
{
checkWallTime(0.05);
if (interrupted) // time to abort the experiment
{
logLine(logWriter, "interrupting experiment...");
break;
}
final int thisIterationGameCounter = gameCounter;
saveCheckpoints
(
gameCounter,
weightsUpdateCounter,
featureSets,
selectionFunctions,
playoutFunctions,
tspgFunctions,
valueFunction,
experienceBuffers,
specialMoveExperienceBuffers,
selectionOptimisers,
tspgOptimisers,
valueFunctionOptimiser,
avgGameDurations,
featureOccurrences,
winningMovesFeatures,
losingMovesFeatures,
antiDefeatingMovesFeatures,
false
);
if
(
!featureDiscoveryParams.noGrowFeatureSet
&&
gameCounter > 0
&&
gameCounter % featureDiscoveryParams.addFeatureEvery == 0
)
{
final BaseFeatureSet[] expandedFeatureSets = new BaseFeatureSet[numPlayers + 1];
final ExecutorService threadPool = Executors.newFixedThreadPool(featureDiscoveryParams.numFeatureDiscoveryThreads);
final CountDownLatch latch = new CountDownLatch(numPlayers);
for (int pIdx = 1; pIdx <= numPlayers; ++pIdx)
{
final int p = pIdx;
final BaseFeatureSet featureSetP = featureSets[p];
threadPool.submit
(
() ->
{
try
{
// We'll sample a batch from our replay buffer, and grow feature set
final int batchSize = trainingParams.batchSize;
final List<ExItExperience> batch = experienceBuffers[p].sampleExperienceBatchUniformly(batchSize);
final List<ExItExperience> specialMovesBatch =
specialMoveExperienceBuffers[p].sampleExperienceBatchUniformly(batchSize);
final long startTime = System.currentTimeMillis();
final BaseFeatureSet expandedFeatureSet;
if (!featureDiscoveryParams.useSpecialMovesExpanderSplit || thisIterationGameCounter % 2 != 0)
{
expandedFeatureSet =
expandFeatureSet
(
batch, featureSetExpander, featureSetP, p, selectionPolicy,
featureActiveRatios[p], featureLifetimes[p], featureOccurrences[p],
winningMovesFeatures[p], losingMovesFeatures[p], antiDefeatingMovesFeatures[p]
);
}
else
{
expandedFeatureSet =
expandFeatureSet
(
specialMovesBatch, specialMovesExpander, featureSetP, p, selectionPolicy,
featureActiveRatios[p], featureLifetimes[p], featureOccurrences[p],
winningMovesFeatures[p], losingMovesFeatures[p], antiDefeatingMovesFeatures[p]
);
}
// And try again to expand for special moves
final BaseFeatureSet toExpand = (expandedFeatureSet != null) ? expandedFeatureSet : featureSetP;
final BaseFeatureSet specialMovesExpandedFeatureSet;
if (featureDiscoveryParams.useSpecialMovesExpander)
{
specialMovesExpandedFeatureSet =
expandFeatureSet
(
specialMovesBatch, specialMovesExpander, toExpand, p, selectionPolicy,
featureActiveRatios[p], featureLifetimes[p], featureOccurrences[p],
winningMovesFeatures[p], losingMovesFeatures[p], antiDefeatingMovesFeatures[p]
);
}
else
{
specialMovesExpandedFeatureSet = null;
}
final BaseFeatureSet newFeatureSet;
if (specialMovesExpandedFeatureSet != null)
newFeatureSet = specialMovesExpandedFeatureSet;
else if (expandedFeatureSet != null)
newFeatureSet = expandedFeatureSet;
else
newFeatureSet = null;
if (newFeatureSet != null)
{
expandedFeatureSets[p] = expandedFeatureSet;
logLine
(
logWriter,
"Expanded feature set in " + (System.currentTimeMillis() - startTime) + " ms for P" + p + "."
);
//System.out.println("Expanded feature set in " + (System.currentTimeMillis() - startTime) + " ms for P" + p + ".");
}
else
{
expandedFeatureSets[p] = featureSetP;
}
}
catch (final Exception e)
{
e.printStackTrace();
}
finally
{
latch.countDown();
}
}
);
}
try
{
latch.await();
}
catch (final InterruptedException e)
{
e.printStackTrace();
}
threadPool.shutdown();
selectionPolicy.updateFeatureSets(expandedFeatureSets);
playoutPolicy.updateFeatureSets(expandedFeatureSets);
menagerie.updateDevFeatures(AIUtils.generateFeaturesMetadata(selectionPolicy, playoutPolicy));
if (objectiveParams.trainTSPG)
tspgPolicy.updateFeatureSets(expandedFeatureSets);
featureSets = expandedFeatureSets;
}
logLine(logWriter, "starting game " + (gameCounter + 1));
// play a game
game.start(context);
// here we'll collect all tuples of experience during this game
final List<List<ExItExperience>> gameExperienceSamples = new ArrayList<List<ExItExperience>>(numPlayers + 1);
gameExperienceSamples.add(null);
// Get agents from menagerie
final DrawnAgentsData drawnExperts = menagerie.drawAgents(game, agentsParams);
final List<ExpertPolicy> experts = drawnExperts.getAgents();
for (int p = 1; p < experts.size(); ++p)
{
if (experts.get(p) instanceof MCTS)
{
((MCTS)experts.get(p)).setNumThreads(agentsParams.numAgentThreads);
((MCTS)experts.get(p)).setUseScoreBounds(true);
}
experts.get(p).initAI(game, p);
gameExperienceSamples.add(new ArrayList<ExItExperience>());
if (objectiveParams.trainTSPG && !(experts.get(p) instanceof MCTS))
System.err.println("A non-MCTS expert cannot be used for training the TSPG objective!");
}
// init some stuff for CE exploration
double ceExploreCurrISWeight = 1.0;
final List<FVector> ceExploreGradientVectors = new ArrayList<FVector>();
final TIntArrayList ceExploreMovers = new TIntArrayList();
final TFloatArrayList ceExploreRewards = new TFloatArrayList();
while (!context.trial().over())
{
if (interrupted) // time to abort the experiment
{
logLine(logWriter, "interrupting experiment...");
break;
}
// have expert choose action
final int mover = context.state().mover();
final ExpertPolicy expert = experts.get(context.state().playerToAgent(mover));
expert.selectAction
(
game,
expert.copyContext(context),
agentsParams.thinkingTime,
agentsParams.iterationLimit,
agentsParams.depthLimit
);
final FastArrayList<Move> legalMoves = new FastArrayList<Move>();
for (final Move legalMove : expert.lastSearchRootMoves())
{
legalMoves.add(legalMove);
}
final FVector expertDistribution = expert.computeExpertPolicy(1.0);
final Move move = legalMoves.get(expertDistribution.sampleProportionally());
// Collect experiences for this game (don't store in buffer yet, don't know episode duration or value)
final List<ExItExperience> newExperiences = expert.generateExItExperiences();
for (final ExItExperience newExperience : newExperiences)
{
final int experienceMover = newExperience.state().state().mover();
if (valueFunction != null)
newExperience.setStateFeatureVector(valueFunction.computeStateFeatureVector(newExperience.context(), experienceMover));
// Update feature lifetimes, active ratios, winning/losing/anti-defeating features, etc.
updateFeatureActivityData
(
newExperience.context(), experienceMover, featureSets,
featureLifetimes, featureActiveRatios, featureOccurrences,
winningMovesFeatures, losingMovesFeatures, antiDefeatingMovesFeatures,
newExperience
);
gameExperienceSamples.get(experienceMover).add(newExperience);
}
// Apply chosen action
game.apply(context, move);
++actionCounter;
if (actionCounter % trainingParams.updateWeightsEvery == 0)
{
// Time to update our weights a bit (once for every player-specific model)
final int batchSize = trainingParams.batchSize;
for (int p = 1; p <= numPlayers; ++p)
{
final List<ExItExperience> batch = experienceBuffers[p].sampleExperienceBatch(batchSize);
if (batch.size() == 0)
continue;
final List<FVector> gradientsSelection = new ArrayList<FVector>(batch.size());
final List<FVector> gradientsPlayout = new ArrayList<FVector>(batch.size());
final List<FVector> gradientsTSPG = new ArrayList<FVector>(batch.size());
final List<FVector> gradientsCEExplore = new ArrayList<FVector>(batch.size());
final List<FVector> gradientsValueFunction = new ArrayList<FVector>(batch.size());
// for PER
final int[] indices = new int[batch.size()];
final float[] priorities = new float[batch.size()];
// for WIS
double sumImportanceSamplingWeights = 0.0;
for (int idx = 0; idx < batch.size(); ++idx)
{
final ExItExperience sample = batch.get(idx);
final FeatureVector[] featureVectors =
featureSets[p].computeFeatureVectors
(
sample.state().state(),
sample.state().lastDecisionMove(),
sample.moves(),
false
);
final FVector expertPolicy = sample.expertDistribution();
// Note: NOT using sample.state().state().mover(), but p here, important to update
// shared weights correctly!
final FVector selectionErrors =
Gradients.computeCrossEntropyErrors
(
selectionPolicy, expertPolicy, featureVectors, p, objectiveParams.handleAliasing
);
final FVector playoutErrors =
Gradients.computeCrossEntropyErrors
(
playoutPolicy, expertPolicy, featureVectors, p, objectiveParams.handleAliasingPlayouts
);
final FVector selectionGradients = selectionPolicy.computeParamGradients
(
selectionErrors,
featureVectors,
p
);
final FVector playoutGradients = selectionPolicy.computeParamGradients
(
playoutErrors,
featureVectors,
p
);
final FVector valueGradients = Gradients.computeValueGradients(valueFunction, p, sample);
double importanceSamplingWeight = sample.weightVisitCount();
double nonImportanceSamplingWeight = 1.0; // Also used to scale gradients, but doesn't count as IS
if (objectiveParams.importanceSamplingEpisodeDurations)
importanceSamplingWeight *= (avgGameDurations[sample.state().state().mover()].movingAvg() / sample.episodeDuration());
if (trainingParams.prioritizedExperienceReplay)
{
final FVector absErrors = selectionErrors.copy();
absErrors.abs();
// Minimum priority of 0.05 to avoid crashes with 0-error samples
priorities[idx] = Math.max(0.05f, absErrors.sum());
importanceSamplingWeight *= sample.weightPER();
indices[idx] = sample.bufferIdx();
}
sumImportanceSamplingWeights += importanceSamplingWeight;
selectionGradients.mult((float) (importanceSamplingWeight * nonImportanceSamplingWeight));
playoutGradients.mult((float) (importanceSamplingWeight * nonImportanceSamplingWeight));
gradientsSelection.add(selectionGradients);
gradientsPlayout.add(playoutGradients);
if (valueGradients != null)
{
valueGradients.mult((float) importanceSamplingWeight);
gradientsValueFunction.add(valueGradients);
}
if (objectiveParams.trainTSPG && p > 0)
{
// and gradients for TSPG
final FVector pi =
tspgPolicy.computeDistribution(featureVectors, sample.state().state().mover());
final FVector expertQs = sample.expertValueEstimates();
final FVector grads = new FVector(tspgFunctions[p].trainableParams().allWeights().dim());
for (int i = 0; i < sample.moves().size(); ++i)
{
final float expertQ = expertQs.get(i);
final float pi_sa = pi.get(i);
for (int j = 0; j < sample.moves().size(); ++j)
{
final FeatureVector featureVector = featureVectors[j];
// Dense representation for aspatial features
final FVector aspatialFeatureVals = featureVector.aspatialFeatureValues();
final int numAspatialFeatures = aspatialFeatureVals.dim();
for (int k = 0; k < numAspatialFeatures; ++k)
{
if (i == j)
grads.addToEntry(k, aspatialFeatureVals.get(k) * expertQ * pi_sa * (1.f - pi_sa));
else
grads.addToEntry(k, aspatialFeatureVals.get(k) * expertQ * pi_sa * (0.f - pi.get(j)));
}
// Sparse representation for spatial features (num aspatial features as offset for indexing)
final TIntArrayList sparseSpatialFeatures = featureVector.activeSpatialFeatureIndices();
for (int k = 0; k < sparseSpatialFeatures.size(); ++k)
{
final int feature = sparseSpatialFeatures.getQuick(k);
if (i == j)
grads.addToEntry(feature + numAspatialFeatures, expertQ * pi_sa * (1.f - pi_sa));
else
grads.addToEntry(feature + numAspatialFeatures, expertQ * pi_sa * (0.f - pi.get(j)));
}
}
}
grads.mult((float) (importanceSamplingWeight * nonImportanceSamplingWeight));
gradientsTSPG.add(grads);
}
}
final FVector meanGradientsSelection;
final FVector meanGradientsPlayout;
final FVector meanGradientsValue;
final FVector meanGradientsTSPG;
if (objectiveParams.weightedImportanceSampling)
{
// For WIS, we don't divide by number of vectors, but by sum of IS weights
meanGradientsSelection = Gradients.wisGradients(gradientsSelection, (float)sumImportanceSamplingWeights);
meanGradientsPlayout = Gradients.wisGradients(gradientsPlayout, (float)sumImportanceSamplingWeights);
meanGradientsValue = Gradients.wisGradients(gradientsValueFunction, (float)sumImportanceSamplingWeights);
meanGradientsTSPG = Gradients.wisGradients(gradientsTSPG, (float)sumImportanceSamplingWeights);
}
else
{
meanGradientsSelection = Gradients.meanGradients(gradientsSelection);
meanGradientsPlayout = Gradients.meanGradients(gradientsPlayout);
meanGradientsValue = Gradients.meanGradients(gradientsValueFunction);
meanGradientsTSPG = Gradients.meanGradients(gradientsTSPG);
}
Gradients.minimise
(
selectionOptimisers[p],
selectionFunctions[p].trainableParams().allWeights(),
meanGradientsSelection,
(float)objectiveParams.weightDecayLambda
);
Gradients.minimise
(
playoutOptimisers[p],
playoutFunctions[p].trainableParams().allWeights(),
meanGradientsPlayout,
(float)objectiveParams.weightDecayLambda
);
menagerie.updateDevFeatures(AIUtils.generateFeaturesMetadata(selectionPolicy, playoutPolicy));
if (meanGradientsValue != null && valueFunction != null)
{
final FVector valueFunctionParams = valueFunction.paramsVector();
Gradients.minimise
(
valueFunctionOptimiser, // TODO don't we need separate one per player??????
valueFunctionParams,
meanGradientsValue,
(float)objectiveParams.weightDecayLambda
);
valueFunction.updateParams(game, valueFunctionParams, 0);
menagerie.updateDevHeuristics(Heuristics.copy(valueFunction));
}
if (objectiveParams.trainTSPG && p > 0)
{
// NOTE: maximise here instead of minimise!
Gradients.maximise
(
tspgOptimisers[p],
tspgFunctions[p].trainableParams().allWeights(),
meanGradientsTSPG,
(float)objectiveParams.weightDecayLambda
);
}
// update PER priorities
if (trainingParams.prioritizedExperienceReplay && p > 0)
{
final PrioritizedReplayBuffer buffer = (PrioritizedReplayBuffer) experienceBuffers[p];
buffer.setPriorities(indices, priorities);
}
}
++weightsUpdateCounter;
}
}
if (!interrupted)
{
// Game is over, we can now store all experience collected in the real buffers
for (int p = 1; p <= numPlayers; ++p)
{
final List<ExItExperience> pExperience = gameExperienceSamples.get(p);
// Note: not really game duration! Just from perspective of one player!
final int gameDuration = pExperience.size(); // NOTE: technically wrong for non-root experiences
avgGameDurations[p].observe(gameDuration);
// // For WED we want to weigh this observation proportionally to the value of the observation itself
// for (int i = 0; i < gameDuration; ++i)
// {
// avgGameDurations[p].observe(gameDuration);
// }
final double[] playerOutcomes = RankUtils.agentUtilities(context);
// Shuffle experiences so they're no longer in chronological order
Collections.shuffle(pExperience, ThreadLocalRandom.current());
for (final ExItExperience experience : pExperience)
{
experience.setEpisodeDuration(gameDuration);
experience.setPlayerOutcomes(playerOutcomes);
experienceBuffers[p].add(experience);
if
(
!experience.winningMoves().isEmpty()
||
!experience.losingMoves().isEmpty()
||
!experience.antiDefeatingMoves().isEmpty()
)
{
specialMoveExperienceBuffers[p].add(experience);
}
}
}
}
if (context.trial().over())
{
// Menagerie may want to know about the outcome
menagerie.updateOutcome(context, drawnExperts);
logLine(logWriter, "Finished running game " + (gameCounter + 1));
}
for (int p = 1; p < experts.size(); ++p)
{
experts.get(p).closeAI();
}
}
// Final forced save of checkpoints at end of run
saveCheckpoints
(
gameCounter + 1,
weightsUpdateCounter,
featureSets,
selectionFunctions,
playoutFunctions,
tspgFunctions,
valueFunction,
experienceBuffers,
specialMoveExperienceBuffers,
selectionOptimisers,
tspgOptimisers,
valueFunctionOptimiser,
avgGameDurations,
featureOccurrences,
winningMovesFeatures,
losingMovesFeatures,
antiDefeatingMovesFeatures,
true
);
final String menagerieLog = menagerie.generateLog();
if (menagerieLog != null)
logLine(logWriter, menagerie.generateLog());
}
//-----------------------------------------------------------------
/**
* Updates data related to which features are active how often,
* their lifetimes, whether they are special types of moves
* (like 100% winning moves), etc.
*
* @param context
* @param mover
* @param featureSets
* @param featureLifetimes
* @param featureActiveRatios
* @param featureOccurrences
* @param winningMovesFeatures
* @param losingMovesFeatures
* @param antiDefeatingMovesFeatures
* @param experience
*/
private void updateFeatureActivityData
(
final Context context,
final int mover,
final BaseFeatureSet[] featureSets,
final TLongArrayList[] featureLifetimes,
final TDoubleArrayList[] featureActiveRatios,
final TLongArrayList[] featureOccurrences,
final BitSet[] winningMovesFeatures,
final BitSet[] losingMovesFeatures,
final BitSet[] antiDefeatingMovesFeatures,
final ExItExperience experience
)
{
final FastArrayList<Move> legalMoves = experience.moves();
final TIntArrayList[] sparseFeatureVectors =
featureSets[mover].computeSparseSpatialFeatureVectors(context, legalMoves, false);
for (final TIntArrayList featureVector : sparseFeatureVectors)
{
if (featureVector.isEmpty())
continue; // Probably a pass/swap/other special move, don't want these affecting our active ratios
// Following code expects the indices in the sparse feature vector to be sorted
featureVector.sort();
// Increase lifetime of all features by 1
featureLifetimes[mover].transformValues((final long l) -> {return l + 1L;});
// Incrementally update all average feature values
final TDoubleArrayList list = featureActiveRatios[mover];
int vectorIdx = 0;
for (int i = 0; i < list.size(); ++i)
{
final double oldMean = list.getQuick(i);
if (vectorIdx < featureVector.size() && featureVector.getQuick(vectorIdx) == i)
{
// ith feature is active
list.setQuick(i, oldMean + ((1.0 - oldMean) / featureLifetimes[mover].getQuick(i)));
featureOccurrences[mover].setQuick(i, featureOccurrences[mover].getQuick(i) + 1);
++vectorIdx;
}
else
{
// ith feature is not active
list.setQuick(i, oldMean + ((0.0 - oldMean) / featureLifetimes[mover].getQuick(i)));
}
}
if (vectorIdx != featureVector.size())
{
System.err.println("ERROR: expected vectorIdx == featureVector.size()!");
System.err.println("vectorIdx = " + vectorIdx);
System.err.println("featureVector.size() = " + featureVector.size());
System.err.println("featureVector = " + featureVector);
}
}
// Compute which moves (if any) are winning, losing, or anti-defeating
final boolean[] isWinning = new boolean[legalMoves.size()];
final boolean[] isLosing = new boolean[legalMoves.size()];
final int[] numDefeatingResponses = new int[legalMoves.size()];
int maxNumDefeatingResponses = 0;
// For every legal move, a BitSet-representation of which features are active
final BitSet[] activeFeatureBitSets = new BitSet[legalMoves.size()];
for (int i = 0; i < legalMoves.size(); ++i)
{
// Compute BitSet representation of active features
final TIntArrayList featureVector = sparseFeatureVectors[i];
activeFeatureBitSets[i] = new BitSet();
for (int j = featureVector.size() - 1; j >= 0; --j)
{
activeFeatureBitSets[i].set(featureVector.getQuick(j));
}
final Context contextCopy = new TempContext(context);
contextCopy.game().apply(contextCopy, legalMoves.get(i));
if (!contextCopy.active(mover))
{
if (contextCopy.winners().contains(mover))
isWinning[i] = true;
else if (contextCopy.losers().contains(mover))
isLosing[i] = true;
}
else if (contextCopy.state().mover() != mover) // Not interested in defeating responses if we're the mover again
{
final BitSet antiDefeatingActiveFeatures = (BitSet) activeFeatureBitSets[i].clone();
antiDefeatingActiveFeatures.and(antiDefeatingMovesFeatures[mover]);
final FastArrayList<Move> responses = contextCopy.game().moves(contextCopy).moves();
for (int j = 0; j < responses.size(); ++j)
{
final Context responseContextCopy = new TempContext(contextCopy);
responseContextCopy.game().apply(responseContextCopy, responses.get(j));
if (responseContextCopy.losers().contains(mover))
{
++numDefeatingResponses[i];
if (numDefeatingResponses[i] > maxNumDefeatingResponses)
maxNumDefeatingResponses = numDefeatingResponses[i];
}
}
}
else
{
numDefeatingResponses[i] = Integer.MAX_VALUE; // Accounting for moves that let us move again gets too complicated
}
}
final BitSet winningFeatures = winningMovesFeatures[mover];
final BitSet losingFeatures = losingMovesFeatures[mover];
final BitSet antiDefeatingFeatures = antiDefeatingMovesFeatures[mover];
final BitSet winningMoves = new BitSet();
final BitSet losingMoves = new BitSet();
final BitSet antiDefeatingMoves = new BitSet();
for (int i = legalMoves.size() - 1; i >= 0; --i)
{
if (!isWinning[i])
winningFeatures.andNot(activeFeatureBitSets[i]);
else
winningMoves.set(i);
if (!isLosing[i])
losingFeatures.andNot(activeFeatureBitSets[i]);
else
losingMoves.set(i);
if (numDefeatingResponses[i] >= maxNumDefeatingResponses)
antiDefeatingFeatures.andNot(activeFeatureBitSets[i]);
else
antiDefeatingMoves.set(i);
}
// if (maxNumDefeatingResponses != 0)
// System.out.println("numDefeatingResponses = " + Arrays.toString(numDefeatingResponses));
experience.setWinningMoves(winningMoves);
experience.setLosingMoves(losingMoves);
experience.setAntiDefeatingMoves(antiDefeatingMoves);
}
/**
* Try to expand a feature set.
* @param batch
* @param expander
* @param featureSet
* @param p
* @param policy
* @param featureActiveRatios
* @return Expanded version of feature set, or null if failed
*/
private BaseFeatureSet expandFeatureSet
(
final List<ExItExperience> batch,
final FeatureSetExpander expander,
final BaseFeatureSet featureSet,
final int p,
final SoftmaxPolicyLinear policy,
final TDoubleArrayList featureActiveRatios,
final TLongArrayList featureLifetimes,
final TLongArrayList featureOccurrences,
final BitSet winningMovesFeatures,
final BitSet losingMovesFeatures,
final BitSet antiDefeatingMovesFeatures
)
{
if (batch.size() > 0)
{
final BaseFeatureSet expandedFeatureSet =
expander.expandFeatureSet
(
batch,
featureSet,
policy,
game,
featureDiscoveryParams.combiningFeatureInstanceThreshold,
objectiveParams,
featureDiscoveryParams,
featureActiveRatios,
logWriter,
this
);
if (expandedFeatureSet != null)
{
expandedFeatureSet.init(game, new int[]{p}, null);
if (featureActiveRatios.size() < expandedFeatureSet.getNumSpatialFeatures())
{
// Add new entries for lifetime, average activity, occurrences, and winning/losing/anti-defeating
winningMovesFeatures.set(featureActiveRatios.size(), expandedFeatureSet.getNumSpatialFeatures());
losingMovesFeatures.set(featureActiveRatios.size(), expandedFeatureSet.getNumSpatialFeatures());
antiDefeatingMovesFeatures.set(featureActiveRatios.size(), expandedFeatureSet.getNumSpatialFeatures());
while (featureActiveRatios.size() < expandedFeatureSet.getNumSpatialFeatures())
{
featureActiveRatios.add(0.0);
featureLifetimes.add(0L);
featureOccurrences.add(0L);
}
}
}
// Previously cached feature sets likely useless / less useful now, so clear cache
JITSPatterNetFeatureSet.clearFeatureSetCache();
return expandedFeatureSet;
}
return null;
}
/**
* Creates (or loads) optimisers for Selection (one per player)
*
* @return
*/
private Optimiser[] prepareSelectionOptimisers()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
Optimiser optimiser = null;
currentOptimiserSelectionFilenames[p] = getFilenameLastCheckpoint("OptimiserSelection_P" + p, "opt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentOptimiserSelectionFilenames[p], "OptimiserSelection_P" + p, "opt")
);
//System.out.println("CE opt set lastCheckpoint = " + lastCheckpoint);
if (currentOptimiserSelectionFilenames[p] == null)
{
// create new optimiser
optimiser = OptimiserFactory.createOptimiser(optimisersParams.selectionOptimiserConfig);
logLine(logWriter, "starting with new optimiser for Selection phase");
}
else
{
// load optimiser from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentOptimiserSelectionFilenames[p]
)))
)
{
optimiser = (Optimiser) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with Selection optimiser loaded from " + currentOptimiserSelectionFilenames[p]);
}
optimisers[p] = optimiser;
}
return optimisers;
}
/**
* Creates (or loads) optimisers for Playout (one per player)
*
* @return
*/
private Optimiser[] preparePlayoutOptimisers()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
Optimiser optimiser = null;
currentOptimiserPlayoutFilenames[p] = getFilenameLastCheckpoint("OptimiserPlayout_P" + p, "opt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentOptimiserPlayoutFilenames[p], "OptimiserPlayout_P" + p, "opt")
);
//System.out.println("CE opt set lastCheckpoint = " + lastCheckpoint);
if (currentOptimiserPlayoutFilenames[p] == null)
{
// create new optimiser
optimiser = OptimiserFactory.createOptimiser(optimisersParams.playoutOptimiserConfig);
logLine(logWriter, "starting with new optimiser for Playout phase");
}
else
{
// load optimiser from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentOptimiserPlayoutFilenames[p]
)))
)
{
optimiser = (Optimiser) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with Playout optimiser loaded from " + currentOptimiserPlayoutFilenames[p]);
}
optimisers[p] = optimiser;
}
return optimisers;
}
/**
* Creates new optimisers for CE (one per player)
*
* @return
*/
private Optimiser[] instantiateCrossEntropyOptimisers()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
optimisers[p] = OptimiserFactory.createOptimiser(optimisersParams.playoutOptimiserConfig);
}
return optimisers;
}
/**
* Creates (or loads) optimisers for TSPG (one per player)
*
* @return
*/
private Optimiser[] prepareTSPGOptimisers()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
if (objectiveParams.trainTSPG)
{
for (int p = 1; p <= numPlayers; ++p)
{
Optimiser optimiser = null;
currentOptimiserTSPGFilenames[p] = getFilenameLastCheckpoint("OptimiserTSPG_P" + p, "opt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentOptimiserTSPGFilenames[p], "OptimiserTSPG_P" + p, "opt")
);
//System.out.println("TSPG opt set lastCheckpoint = " + lastCheckpoint);
if (currentOptimiserTSPGFilenames[p] == null)
{
// create new optimiser
optimiser = OptimiserFactory.createOptimiser(optimisersParams.tspgOptimiserConfig);
logLine(logWriter, "starting with new optimiser for TSPG");
}
else
{
// load optimiser from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentOptimiserTSPGFilenames[p]
)))
)
{
optimiser = (Optimiser) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with TSPG optimiser loaded from " + currentOptimiserTSPGFilenames[p]);
}
optimisers[p] = optimiser;
}
}
return optimisers;
}
/**
* Creates (or loads) optimisers for CEE (one per player)
*
* @return
*/
private Optimiser[] prepareCEExploreOptimisers()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
Optimiser optimiser = null;
currentOptimiserCEEFilenames[p] = getFilenameLastCheckpoint("OptimiserCEE_P" + p, "opt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentOptimiserCEEFilenames[p], "OptimiserCEE_P" + p, "opt")
);
//System.out.println("CEE opt set lastCheckpoint = " + lastCheckpoint);
if (currentOptimiserCEEFilenames[p] == null)
{
// create new optimiser
optimiser = OptimiserFactory.createOptimiser(optimisersParams.ceExploreOptimiserConfig);
logLine(logWriter, "starting with new optimiser for CEE");
}
else
{
// load optimiser from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentOptimiserCEEFilenames[p]
)))
)
{
optimiser = (Optimiser) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with CEE optimiser loaded from " + currentOptimiserCEEFilenames[p]);
}
optimisers[p] = optimiser;
}
return optimisers;
}
/**
* Creates (or loads) optimiser for Value function (one shared for all players)
*
* @return
*/
private Optimiser prepareValueFunctionOptimiser()
{
final Optimiser[] optimisers = new Optimiser[numPlayers + 1];
Optimiser optimiser = null;
currentOptimiserValueFilename = getFilenameLastCheckpoint("OptimiserValue", "opt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentOptimiserValueFilename, "OptimiserValue", "opt")
);
if (currentOptimiserValueFilename == null)
{
// create new optimiser
optimiser = OptimiserFactory.createOptimiser(optimisersParams.valueOptimiserConfig);
logLine(logWriter, "starting with new optimiser for Value function");
}
else
{
// load optimiser from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentOptimiserValueFilename
)))
)
{
optimiser = (Optimiser) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with Value function optimiser loaded from " + currentOptimiserValueFilename);
}
return optimiser;
}
/**
* Creates (or loads) experience buffers (one per player)
*
* @param prio
* @return
*/
private ExperienceBuffer[] prepareExperienceBuffers(final boolean prio)
{
final ExperienceBuffer[] experienceBuffers = new ExperienceBuffer[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final ExperienceBuffer experienceBuffer;
currentExperienceBufferFilenames[p] = getFilenameLastCheckpoint("ExperienceBuffer_P" + p, "buf");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentExperienceBufferFilenames[p], "ExperienceBuffer_P" + p, "buf")
);
//System.out.println("Buffers set lastCheckpoint = " + lastCheckpoint);
if (currentExperienceBufferFilenames[p] == null)
{
// create new Experience Buffer
if (prio)
experienceBuffer = new PrioritizedReplayBuffer(trainingParams.experienceBufferSize);
else
experienceBuffer = new UniformExperienceBuffer(trainingParams.experienceBufferSize);
logLine(logWriter, "starting with empty experience buffer");
}
else
{
// load experience buffer from file
experienceBuffer =
prio
? PrioritizedReplayBuffer.fromFile(game, outParams.outDir.getAbsolutePath() + File.separator + currentExperienceBufferFilenames[p])
: UniformExperienceBuffer.fromFile(game, outParams.outDir.getAbsolutePath() + File.separator + currentExperienceBufferFilenames[p]);
logLine(logWriter, "continuing with experience buffer loaded from " + currentExperienceBufferFilenames[p]);
}
experienceBuffers[p] = experienceBuffer;
}
return experienceBuffers;
}
/**
* Creates (or loads) experience buffers (one per player) for special moves
* (winning, losing, and anti-defeating moves)
*
* @return
*/
private ExperienceBuffer[] prepareSpecialMoveExperienceBuffers()
{
final ExperienceBuffer[] experienceBuffers = new ExperienceBuffer[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final ExperienceBuffer experienceBuffer;
currentSpecialMoveExperienceBufferFilenames[p] = getFilenameLastCheckpoint("SpecialMoveExperienceBuffer_P" + p, "buf");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentSpecialMoveExperienceBufferFilenames[p], "SpecialMoveExperienceBuffer_P" + p, "buf")
);
//System.out.println("Buffers set lastCheckpoint = " + lastCheckpoint);
if (currentSpecialMoveExperienceBufferFilenames[p] == null)
{
// create new Experience Buffer
experienceBuffer = new UniformExperienceBuffer(trainingParams.experienceBufferSize);
logLine(logWriter, "starting with empty experience buffer for special moves");
}
else
{
// load experience buffer from file
experienceBuffer =
UniformExperienceBuffer.fromFile(game, outParams.outDir.getAbsolutePath() + File.separator + currentSpecialMoveExperienceBufferFilenames[p]);
logLine(logWriter, "continuing with experience buffer for special moves loaded from " + currentSpecialMoveExperienceBufferFilenames[p]);
}
experienceBuffers[p] = experienceBuffer;
}
return experienceBuffers;
}
/**
* Creates (or loads) experience buffers for final states (one per player)
*
* @param prio
* @return
*/
private ExperienceBuffer[] prepareFinalStatesExperienceBuffers()
{
final ExperienceBuffer[] experienceBuffers = new ExperienceBuffer[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final ExperienceBuffer experienceBuffer;
currentFinalStatesExperienceBufferFilenames[p] = getFilenameLastCheckpoint("FinalStatesExperienceBuffer_P" + p, "buf");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentFinalStatesExperienceBufferFilenames[p], "FinalStatesExperienceBuffer_P" + p, "buf")
);
if (currentFinalStatesExperienceBufferFilenames[p] == null)
{
// create new Experience Buffer
experienceBuffer = new UniformExperienceBuffer(trainingParams.experienceBufferSize);
logLine(logWriter, "starting with empty final states experience buffer");
}
else
{
// load experience buffer from file
experienceBuffer =
UniformExperienceBuffer.fromFile(game, outParams.outDir.getAbsolutePath() + File.separator + currentFinalStatesExperienceBufferFilenames[p]);
logLine(logWriter, "continuing with final states experience buffer loaded from " + currentFinalStatesExperienceBufferFilenames[p]);
}
experienceBuffers[p] = experienceBuffer;
}
return experienceBuffers;
}
/**
* Creates (or loads) trackers for average game duration (one per player)
*
* @return
*/
private ExponentialMovingAverage[] prepareGameDurationTrackers()
{
final ExponentialMovingAverage[] trackers = new ExponentialMovingAverage[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
ExponentialMovingAverage tracker = null;
currentGameDurationTrackerFilenames[p] = getFilenameLastCheckpoint("GameDurationTracker_P" + p, "bin");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentGameDurationTrackerFilenames[p], "GameDurationTracker_P" + p, "bin")
);
//System.out.println("Game dur trackers set lastCheckpoint = " + lastCheckpoint);
if (currentGameDurationTrackerFilenames[p] == null)
{
// Create new tracker
tracker = new ExponentialMovingAverage();
logLine(logWriter, "starting with new tracker for average game duration");
}
else
{
// load tracker from file
try
(
final ObjectInputStream reader =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(
outParams.outDir.getAbsolutePath() + File.separator + currentGameDurationTrackerFilenames[p]
)))
)
{
tracker = (ExponentialMovingAverage) reader.readObject();
}
catch (final IOException | ClassNotFoundException e)
{
e.printStackTrace();
}
logLine(logWriter, "continuing with average game duration tracker loaded from " + currentGameDurationTrackerFilenames[p]);
}
trackers[p] = tracker;
}
return trackers;
}
/**
* Creates (or loads) linear functions (one per player) for Selection phase
* @param featureSets
* @return
*/
private LinearFunction[] prepareSelectionFunctions(final BaseFeatureSet[] featureSets)
{
final LinearFunction[] linearFunctions = new LinearFunction[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final LinearFunction linearFunction;
currentPolicyWeightsSelectionFilenames[p] = getFilenameLastCheckpoint("PolicyWeightsSelection_P" + p, "txt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentPolicyWeightsSelectionFilenames[p], "PolicyWeightsSelection_P" + p, "txt")
);
//System.out.println("CE funcs set lastCheckpoint = " + lastCheckpoint);
if (currentPolicyWeightsSelectionFilenames[p] == null)
{
// Create new linear function
linearFunction = new LinearFunction(new WeightVector(new FVector(featureSets[p].getNumFeatures())));
logLine(logWriter, "starting with new 0-weights linear function for Selection phase");
}
else
{
// Load weights from file
linearFunction =
LinearFunction.fromFile(outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsSelectionFilenames[p]);
logLine(logWriter, "continuing with Selection policy weights loaded from " + currentPolicyWeightsSelectionFilenames[p]);
try
{
// make sure we're combining correct function with feature set
String featureSetFilepath =
new File(outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsSelectionFilenames[p]).getParent();
featureSetFilepath += File.separator + linearFunction.featureSetFile();
if
(
!new File(featureSetFilepath).getCanonicalPath().equals
(
new File(outParams.outDir.getAbsolutePath() + File.separator + currentFeatureSetFilenames[p]).getCanonicalPath()
)
)
{
System.err.println
(
"Warning: selection policy weights were saved for feature set " + featureSetFilepath
+ ", but we are now using " + currentFeatureSetFilenames[p]
);
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
linearFunctions[p] = linearFunction;
}
return linearFunctions;
}
/**
* Creates (or loads) linear functions (one per player) for Playout phase
* @param featureSets
* @return
*/
private LinearFunction[] preparePlayoutFunctions(final BaseFeatureSet[] featureSets)
{
final LinearFunction[] linearFunctions = new LinearFunction[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final LinearFunction linearFunction;
currentPolicyWeightsPlayoutFilenames[p] = getFilenameLastCheckpoint("PolicyWeightsPlayout_P" + p, "txt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentPolicyWeightsPlayoutFilenames[p], "PolicyWeightsPlayout_P" + p, "txt")
);
//System.out.println("CE funcs set lastCheckpoint = " + lastCheckpoint);
if (currentPolicyWeightsSelectionFilenames[p] == null)
{
// Create new linear function
linearFunction = new LinearFunction(new WeightVector(new FVector(featureSets[p].getNumFeatures())));
logLine(logWriter, "starting with new 0-weights linear function for Playout phase");
}
else
{
// Load weights from file
linearFunction =
LinearFunction.fromFile(outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsPlayoutFilenames[p]);
logLine(logWriter, "continuing with Playout policy weights loaded from " + currentPolicyWeightsPlayoutFilenames[p]);
try
{
// make sure we're combining correct function with feature set
String featureSetFilepath =
new File(outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsPlayoutFilenames[p]).getParent();
featureSetFilepath += File.separator + linearFunction.featureSetFile();
if
(
!new File(featureSetFilepath).getCanonicalPath().equals
(
new File(outParams.outDir.getAbsolutePath() + File.separator + currentFeatureSetFilenames[p]).getCanonicalPath()
)
)
{
System.err.println
(
"Warning: playout policy weights were saved for feature set " + featureSetFilepath
+ ", but we are now using " + currentFeatureSetFilenames[p]
);
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
linearFunctions[p] = linearFunction;
}
return linearFunctions;
}
/**
* Creates (or loads) linear functions (one per player)
* @param featureSets
* @param crossEntropyFunctions CE-trained functions used for boosting
* @return
*/
private LinearFunction[] prepareTSPGFunctions
(
final BaseFeatureSet[] featureSets,
final LinearFunction[] crossEntropyFunctions
)
{
final LinearFunction[] linearFunctions = new LinearFunction[numPlayers + 1];
if (objectiveParams.trainTSPG)
{
for (int p = 1; p <= numPlayers; ++p)
{
final LinearFunction linearFunction;
currentPolicyWeightsTSPGFilenames[p] = getFilenameLastCheckpoint("PolicyWeightsTSPG_P" + p, "txt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentPolicyWeightsTSPGFilenames[p], "PolicyWeightsTSPG_P" + p, "txt")
);
//System.out.println("TSPG funcs set lastCheckpoint = " + lastCheckpoint);
if (currentPolicyWeightsTSPGFilenames[p] == null)
{
// create new boosted linear function
linearFunction =
new BoostedLinearFunction
(
new WeightVector(new FVector(featureSets[p].getNumFeatures())),
crossEntropyFunctions[p]
);
logLine(logWriter, "starting with new 0-weights linear function for TSPG");
}
else
{
// load weights from file
linearFunction =
BoostedLinearFunction.boostedFromFile
(
outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsTSPGFilenames[p],
crossEntropyFunctions[p]
);
logLine(logWriter, "continuing with Selection policy weights loaded from " + currentPolicyWeightsTSPGFilenames[p]);
try
{
// make sure we're combining correct function with feature set
String featureSetFilepath =
new File(outParams.outDir.getAbsolutePath() + File.separator + currentPolicyWeightsTSPGFilenames[p]).getParent();
featureSetFilepath += File.separator + linearFunction.featureSetFile();
if
(
!new File(featureSetFilepath).getCanonicalPath().equals
(
new File(outParams.outDir.getAbsolutePath() + File.separator + currentFeatureSetFilenames[p]).getCanonicalPath()
)
)
{
System.err.println
(
"Warning: policy weights were saved for feature set " + featureSetFilepath
+ ", but we are now using " + currentFeatureSetFilenames[p]
);
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
linearFunctions[p] = linearFunction;
}
}
return linearFunctions;
}
/**
* Creates (or loads) value function
* @return
*/
private Heuristics prepareValueFunction()
{
if (objectiveParams.noValueLearning)
return null;
Heuristics valueFunction = null;
currentValueFunctionFilename = getFilenameLastCheckpoint("ValueFunction", "txt");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentValueFunctionFilename, "ValueFunction", "txt")
);
final Report report = new Report();
if (currentValueFunctionFilename == null)
{
Heuristics initHeuristics;
if ((initHeuristics = loadInitHeuristics()) != null)
{
valueFunction = initHeuristics;
valueFunction.init(game);
}
else if (agentsParams.bestAgentsDataDir != null)
{
// load heuristics from the best-agents-data dir
try
{
final String descr = FileHandling.loadTextContentsFromFile(agentsParams.bestAgentsDataDir + "/BestHeuristics.txt");
valueFunction = (Heuristics)compiler.Compiler.compileObject
(
descr,
"metadata.ai.heuristics.Heuristics",
report
);
valueFunction.init(game);
}
catch (final IOException e)
{
e.printStackTrace();
}
}
else
{
// copy value function from game metadata
valueFunction = Heuristics.copy(game.metadata().ai().heuristics());
valueFunction.init(game);
logLine(logWriter, "starting with new initial value function from .lud metadata");
}
}
else
{
// load value function from file
try
{
final String descr = FileHandling.loadTextContentsFromFile(
outParams.outDir.getAbsolutePath() + File.separator + currentValueFunctionFilename);
valueFunction = (Heuristics)compiler.Compiler.compileObject
(
descr,
"metadata.ai.heuristics.Heuristics",
report
);
valueFunction.init(game);
}
catch (final IOException e)
{
e.printStackTrace();
}
logLine
(
logWriter,
"continuing with value function from " +
outParams.outDir.getAbsolutePath() + File.separator + currentValueFunctionFilename
);
}
return valueFunction;
}
/**
* @return Heuristics specified as initial value function, or null if not specified / failed to load.
*/
private Heuristics loadInitHeuristics()
{
if (trainingParams.initValueFuncDir == null || trainingParams.initValueFuncDir.equals(""))
return null;
final File initHeuristicsDir = new File(trainingParams.initValueFuncDir);
if (initHeuristicsDir.exists() && initHeuristicsDir.isDirectory())
{
final File[] files = initHeuristicsDir.listFiles();
int latestGen = -1;
File latestGenFile = null;
for (final File file : files)
{
final String filename = file.getName();
if (filename.startsWith("results_") && filename.endsWith(".txt"))
{
final int gen =
Integer.parseInt
(
filename.split
(
java.util.regex.Pattern.quote("_")
)[2].replaceAll(java.util.regex.Pattern.quote(".txt"), "")
);
if (gen > latestGen)
{
latestGen = gen;
latestGenFile = file;
}
}
}
if (latestGenFile != null)
{
try
{
final String contents = FileHandling.loadTextContentsFromFile(latestGenFile.getAbsolutePath());
final String[] splitContents = contents.split(java.util.regex.Pattern.quote("\n"));
final List<String> topHeuristicLines = new ArrayList<String>();
// We skip first line, that's just a "-------------------------------" line
for (int i = 1; i < splitContents.length; ++i)
{
final String line = splitContents[i];
if (line.equals("-------------------------------"))
break; // We're done
topHeuristicLines.add(line);
}
// Remove final two lines: they're an empty line, and the top heuristic's score
topHeuristicLines.remove(topHeuristicLines.size() - 1);
topHeuristicLines.remove(topHeuristicLines.size() - 1);
// Compile heuristic
final Heuristics heuristic =
(Heuristics)compiler.Compiler.compileObject
(
StringRoutines.join("\n", topHeuristicLines),
"metadata.ai.heuristics.Heuristics",
new Report()
);
return heuristic;
}
catch (final IOException e)
{
e.printStackTrace();
}
}
}
return null;
}
/**
* Creates (or loads) feature sets (one per player, or a single shared one)
* @return
*/
private BaseFeatureSet[] prepareFeatureSets()
{
final BaseFeatureSet[] featureSets;
final TIntArrayList newlyCreated = new TIntArrayList();
featureSets = new BaseFeatureSet[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
final BaseFeatureSet featureSet;
currentFeatureSetFilenames[p] = getFilenameLastCheckpoint("FeatureSet_P" + p, "fs");
lastCheckpoint =
Math.min
(
lastCheckpoint,
extractCheckpointFromFilename(currentFeatureSetFilenames[p], "FeatureSet_P" + p, "fs")
);
//System.out.println("Feature sets set lastCheckpoint = " + lastCheckpoint);
if (currentFeatureSetFilenames[p] == null)
{
// create new Feature Set
final AtomicFeatureGenerator atomicFeatures = new AtomicFeatureGenerator(game, 2, 4);
featureSet = JITSPatterNetFeatureSet.construct(atomicFeatures.getAspatialFeatures(), atomicFeatures.getSpatialFeatures());
newlyCreated.add(p);
logLine(logWriter, "starting with new initial feature set for Player " + p);
logLine(logWriter, "num atomic features = " + featureSet.getNumSpatialFeatures());
}
else
{
// load feature set from file
featureSet = JITSPatterNetFeatureSet.construct(outParams.outDir.getAbsolutePath() + File.separator + currentFeatureSetFilenames[p]);
logLine
(
logWriter,
"continuing with feature set loaded from " +
outParams.outDir.getAbsolutePath() + File.separator + currentFeatureSetFilenames[p] +
" for Player " + p
);
}
if (featureSet.getNumSpatialFeatures() == 0)
{
System.err.println("ERROR: Feature Set has 0 features!");
logLine(logWriter, "Training with 0 features makes no sense, interrupting experiment.");
interrupted = true;
}
featureSet.init(game, new int[]{p}, null);
featureSets[p] = featureSet;
}
return featureSets;
}
//-----------------------------------------------------------------
/**
* @return When should the next checkpoint be?
*/
private long computeNextCheckpoint()
{
if (lastCheckpoint < 0L)
return 0L;
else if (lastCheckpoint < 10)
return lastCheckpoint + 1;
else
return lastCheckpoint + outParams.checkpointFrequency;
}
/**
* Creates a filename for a given checkpoint
* @param baseFilename
* @param checkpoint
* @param extension
* @return
*/
private String createCheckpointFilename
(
final String baseFilename,
final long checkpoint,
final String extension
)
{
final String format = (outParams.checkpointType == CheckpointTypes.Game)
? gameCheckpointFormat : weightUpdateCheckpointFormat;
return String.format(format, baseFilename, Long.valueOf(checkpoint), extension);
}
/**
* @param baseFilename
* @param extension
* @return Checkpoint extracted from existing filename
*/
private int extractCheckpointFromFilename
(
final String filename,
final String baseFilename,
final String extension
)
{
if (filename == null)
return -1;
final String checkpoint =
filename.substring
(
(baseFilename + "_").length(),
filename.length() - ("." + extension).length()
);
return Integer.parseInt(checkpoint);
}
/**
* Computes a filename for the last checkpoint
* @param baseFilename
* @param extension
* @return Computed filepath, or null if none saved yet
*/
private String getFilenameLastCheckpoint
(
final String baseFilename,
final String extension
)
{
if (outParams.outDir == null)
return null;
final String[] filenames = outParams.outDir.list();
int maxCheckpoint = -1;
for (final String filename : filenames)
{
if
(
filename.startsWith(baseFilename + "_") &&
filename.endsWith("." + extension)
)
{
final int checkpoint = extractCheckpointFromFilename(filename, baseFilename, extension);
if (checkpoint > maxCheckpoint)
maxCheckpoint = checkpoint;
}
}
if (maxCheckpoint < 0)
return null;
return createCheckpointFilename(baseFilename, maxCheckpoint, extension);
}
/**
* Saves checkpoints (if we want to or are forced to)
* @param gameCounter
* @param weightsUpdateCounter
* @param featureSets
* @param selectionFunctions
* @param playoutFunctions
* @param tspgFunctions
* @param experienceBuffers
* @param specialMoveExperienceBuffers
* @param ceOptimisers
* @param tspgOptimisers
* @param valueFunctionOptimiser
* @param avgGameDurations
* @param forced
*/
private void saveCheckpoints
(
final int gameCounter,
final long weightsUpdateCounter,
final BaseFeatureSet[] featureSets,
final LinearFunction[] selectionFunctions,
final LinearFunction[] playoutFunctions,
final LinearFunction[] tspgFunctions,
final Heuristics valueFunction,
final ExperienceBuffer[] experienceBuffers,
final ExperienceBuffer[] specialMoveExperienceBuffers,
final Optimiser[] ceOptimisers,
final Optimiser[] tspgOptimisers,
final Optimiser valueFunctionOptimiser,
final ExponentialMovingAverage[] avgGameDurations,
final TLongArrayList[] featureOccurrences,
final BitSet[] winningMovesFeatures,
final BitSet[] losingMovesFeatures,
final BitSet[] antiDefeatingMovesFeatures,
final boolean forced
)
{
if (outParams.outDir == null)
return;
long nextCheckpoint = computeNextCheckpoint();
if (outParams.checkpointType == CheckpointTypes.Game)
{
if (!forced && gameCounter < nextCheckpoint)
return;
else
nextCheckpoint = gameCounter;
}
else if (outParams.checkpointType == CheckpointTypes.WeightUpdate)
{
if (!forced && weightsUpdateCounter < nextCheckpoint)
return;
else
nextCheckpoint = weightsUpdateCounter;
}
for (int p = 1; p <= numPlayers; ++p)
{
// Save feature set
final String featureSetFilename = createCheckpointFilename("FeatureSet_P" + p, nextCheckpoint, "fs");
featureSets[p].toFile(outParams.outDir.getAbsolutePath() + File.separator + featureSetFilename);
currentFeatureSetFilenames[p] = featureSetFilename;
// Save Selection weights
final String selectionWeightsFilename = createCheckpointFilename("PolicyWeightsSelection_P" + p, nextCheckpoint, "txt");
selectionFunctions[p].writeToFile(
outParams.outDir.getAbsolutePath() + File.separator + selectionWeightsFilename, new String[]{currentFeatureSetFilenames[p]});
currentPolicyWeightsSelectionFilenames[p] = selectionWeightsFilename;
// Save Playout weights
final String playoutWeightsFilename = createCheckpointFilename("PolicyWeightsPlayout_P" + p, nextCheckpoint, "txt");
playoutFunctions[p].writeToFile(
outParams.outDir.getAbsolutePath() + File.separator + playoutWeightsFilename, new String[]{currentFeatureSetFilenames[p]});
currentPolicyWeightsPlayoutFilenames[p] = playoutWeightsFilename;
// Save TSPG weights
if (objectiveParams.trainTSPG)
{
final String tspgWeightsFilename = createCheckpointFilename("PolicyWeightsTSPG_P" + p, nextCheckpoint, "txt");
tspgFunctions[p].writeToFile(
outParams.outDir.getAbsolutePath() + File.separator + tspgWeightsFilename, new String[]{currentFeatureSetFilenames[p]});
currentPolicyWeightsTSPGFilenames[p] = tspgWeightsFilename;
}
if (valueFunction != null)
{
// save Value function
final String valueFunctionFilename = createCheckpointFilename("ValueFunction", nextCheckpoint, "txt");
valueFunction.toFile(game, outParams.outDir.getAbsolutePath() + File.separator + valueFunctionFilename);
}
if (forced)
{
// In this case, we'll also store experience buffers
final String experienceBufferFilename = createCheckpointFilename("ExperienceBuffer_P" + p, nextCheckpoint, "buf");
experienceBuffers[p].writeToFile(outParams.outDir.getAbsolutePath() + File.separator + experienceBufferFilename);
final String specialMoveExperienceBufferFilename = createCheckpointFilename("SpecialMoveExperienceBuffer_P" + p, nextCheckpoint, "buf");
specialMoveExperienceBuffers[p].writeToFile(outParams.outDir.getAbsolutePath() + File.separator + specialMoveExperienceBufferFilename);
// and optimisers
final String ceOptimiserFilename = createCheckpointFilename("OptimiserCE_P" + p, nextCheckpoint, "opt");
ceOptimisers[p].writeToFile(outParams.outDir.getAbsolutePath() + File.separator + ceOptimiserFilename);
currentOptimiserSelectionFilenames[p] = ceOptimiserFilename;
if (objectiveParams.trainTSPG)
{
final String tspgOptimiserFilename = createCheckpointFilename("OptimiserTSPG_P" + p, nextCheckpoint, "opt");
tspgOptimisers[p].writeToFile(outParams.outDir.getAbsolutePath() + File.separator + tspgOptimiserFilename);
currentOptimiserTSPGFilenames[p] = tspgOptimiserFilename;
}
// and average game duration trackers
final String gameDurationTrackerFilename = createCheckpointFilename("GameDurationTracker_P" + p, nextCheckpoint, "bin");
avgGameDurations[p].writeToFile(outParams.outDir.getAbsolutePath() + File.separator + gameDurationTrackerFilename);
currentGameDurationTrackerFilenames[p] = gameDurationTrackerFilename;
// and special moves CSV
final String specialMovesCSVFilename = createCheckpointFilename("SpecialMoves_P" + p, nextCheckpoint, "csv");
try
(
final PrintWriter writer = new PrintWriter(outParams.outDir.getAbsolutePath() + File.separator + specialMovesCSVFilename, "UTF-8")
)
{
// Write header
writer.println
(
StringRoutines.join(",", "SpatialFeatureIndex", "AlwaysWinning", "AlwaysLosing", "AlwaysAntiDefeating", "NumOccurrences")
);
for (int i = 0; i < featureSets[p].getNumSpatialFeatures(); ++i)
{
writer.println
(
StringRoutines.join
(
",",
String.valueOf(i),
winningMovesFeatures[p].get(i) ? "1" : "0",
losingMovesFeatures[p].get(i) ? "1" : "0",
antiDefeatingMovesFeatures[p].get(i) ? "1" : "0",
String.valueOf(featureOccurrences[p].getQuick(i))
)
);
}
}
catch (final IOException e)
{
e.printStackTrace();
}
}
}
if (forced)
{
final String valueOptimiserFilename = createCheckpointFilename("OptimiserValue", nextCheckpoint, "opt");
valueFunctionOptimiser.writeToFile(outParams.outDir.getAbsolutePath() + File.separator + valueOptimiserFilename);
currentOptimiserValueFilename = valueOptimiserFilename;
}
lastCheckpoint = nextCheckpoint;
}
//-----------------------------------------------------------------
@Override
public void logLine(final PrintWriter log, final String line)
{
if (!outParams.noLogging)
super.logLine(log, line);
}
//-----------------------------------------------------------------
};
}
//-------------------------------------------------------------------------
/**
* Creates a writer for output log, or null if we don't want one
* @return
*/
private PrintWriter createLogWriter()
{
if (outParams.outDir != null && !outParams.noLogging)
{
final String nextLogFilepath =
ExperimentFileUtils.getNextFilepath(outParams.outDir.getAbsolutePath() + File.separator + "ExIt", "log");
// first create directories if necessary
new File(nextLogFilepath).getParentFile().mkdirs();
try
{
return new PrintWriter(nextLogFilepath, "UTF-8");
}
catch (final FileNotFoundException | UnsupportedEncodingException e)
{
e.printStackTrace();
return null;
}
}
else
{
return null;
}
}
//-------------------------------------------------------------------------
/**
* Can be used for quick testing without command-line args, or proper
* testing with elaborate setup through command-line args
* @param args
*/
@SuppressWarnings("unchecked")
public static void main(final String[] args)
{
// Feature Set caching is safe in this main method
JITSPatterNetFeatureSet.ALLOW_FEATURE_SET_CACHE = true;
// Define options for arg parser
final CommandLineArgParse argParse =
new CommandLineArgParse
(
true,
"Execute a training run from self-play using Expert Iteration."
);
argParse.addOption(new ArgOption()
.withNames("--game")
.help("Name of the game to play. Should end with \".lud\".")
.withDefault("/Amazons.lud")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--game-options")
.help("Game Options to load.")
.withDefault(new ArrayList<String>(0))
.withNumVals("*")
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--ruleset")
.help("Ruleset to compile.")
.withDefault("")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--game-length-cap", "--max-num-actions")
.help("Maximum number of actions that may be taken before a game is terminated as a draw (-1 for no limit).")
.withDefault(Integer.valueOf(-1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--expert-ai")
.help("Type of AI to use as expert.")
.withDefault("Biased MCTS")
.withNumVals(1)
.withType(OptionTypes.String)
.withLegalVals("BEST_AGENT", "FROM_METADATA", "Biased MCTS", "UCT", "PVTS"));
argParse.addOption(new ArgOption()
.withNames("--best-agents-data-dir")
.help("Filepath for directory with best agents data for this game (+ options).")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--thinking-time", "--time", "--seconds")
.help("Max allowed thinking time per move (in seconds).")
.withDefault(Double.valueOf(1.0))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--iteration-limit", "--iterations")
.help("Max allowed number of MCTS iterations per move.")
.withDefault(Integer.valueOf(-1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--depth-limit")
.help("Search depth limit (e.g. for Alpha-Beta experts).")
.withDefault(Integer.valueOf(-1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--tournament-mode")
.help("If true, we use the tournament mode (similar to the one in Polygames).")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--playout-features-epsilon")
.help("Epsilon for epsilon greedy feature-based playouts")
.withDefault(Double.valueOf(0.0))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--max-biased-playout-actions", "--max-num-biased-playout-actions")
.help("Maximum number of actions per playout which we'll bias using features (-1 for no limit).")
.withDefault(Integer.valueOf(-1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--num-agent-threads")
.help("Number of threads to use for Tree Parallelisation in MCTS-based agents.")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("-n", "--num-games", "--num-training-games")
.help("Number of training games to run.")
.withDefault(Integer.valueOf(200))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--batch-size")
.help("Max size of minibatches in training.")
.withDefault(Integer.valueOf(80))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--buffer-size", "--experience-buffer-size")
.help("Max size of the experience buffer.")
.withDefault(Integer.valueOf(2500))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--update-weights-every")
.help("After this many moves (decision points) in training games, we update weights.")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--prioritized-experience-replay", "--per")
.help("If true, we'll use prioritized experience replay")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--init-value-func-dir")
.help("Directory from which to attempt extracting an initial value function.")
.withDefault("")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--num-policy-gradient-epochs")
.help("Number of epochs to run with policy gradients.")
//.withDefault(Integer.valueOf(100))
.withDefault(Integer.valueOf(0))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--num-trials-per-policy-gradient-epoch")
.help("Number of trials to run per epoch for policy gradients.")
.withDefault(Integer.valueOf(60))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--pg-gamma")
.help("Discount factor gamma for policy gradients (excluding TSPG).")
.withDefault(Double.valueOf(0.9))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--entropy-reg-weight")
.help("Weight for entropy regularization in policy gradients.")
.withDefault(Double.valueOf(0.1))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--num-policy-gradient-threads")
.help("Number of threads to use for parallel trials for policy gradients.")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--post-pg-weight-scalar")
.help("After running policy gradients, scale weights by this value.")
.withDefault(Double.valueOf(0.01))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--add-feature-every")
.help("After this many training games, we add a new feature.")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--no-grow-features", "--no-grow-featureset", "--no-grow-feature-set")
.help("If true, we'll not grow feature set (but still train weights).")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--combining-feature-instance-threshold")
.help("At most this number of feature instances will be taken into account when combining features.")
.withDefault(Integer.valueOf(60))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--num-feature-discovery-threads")
.help("Number of threads to use for parallel feature discovery.")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--critical-value-corr-conf")
.help("Critical value used when computing confidence intervals for correlations ")
//.withDefault(Double.valueOf(1.64))
.withDefault(Double.valueOf(0.00))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--special-moves-expander")
.help("If true, we'll use a special-moves feature expander in addition to the normal one.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--special-moves-expander-split")
.help("If true, we'll use a special-moves feature expander in addition to the normal one, splitting time with the normal one.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--expander-type")
.help("Type of feature set expander to use.")
.withNumVals(1)
.withType(OptionTypes.String)
.withDefault("CorrelationBasedExpander")
.withLegalVals("CorrelationBasedExpander", "CorrelationErrorSignExpander", "Random"));
argParse.addOption(new ArgOption()
.withNames("--train-tspg")
.help("If true, we'll train a policy on TSPG objective (see COG paper).")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--is-episode-durations")
.help("If true, we'll use importance sampling weights based on episode durations for CE-loss.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--wis", "--weighted-importance-sampling")
.help("If true, we use Weighted Importance Sampling instead of Ordinary Importance Sampling for any of the above")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--no-value-learning")
.help("If true, we don't do any value function learning.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--handle-aliasing")
.help("If true, we handle move aliasing by putting the maximum mass among all aliased moves on each of them, for training selection policy.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--handle-aliasing-playouts")
.help("If true, we handle move aliasing by putting the maximum mass among all aliased moves on each of them, for training playout policy.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--weight-decay-lambda")
.help("Lambda param for weight decay")
.withDefault(Double.valueOf(0.000001))
.withNumVals(1)
.withType(OptionTypes.Double));
argParse.addOption(new ArgOption()
.withNames("--selection-optimiser")
.help("Optimiser to use for policy trained for Selection phase.")
.withDefault("RMSProp")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--playout-optimiser")
.help("Optimiser to use for policy trained for Playouts.")
.withDefault("RMSProp")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--cee-optimiser", "--cross-entropy-exploration-optimiser")
.help("Optimiser to use for training Cross-Entropy Exploration policy.")
.withDefault("RMSProp")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--tspg-optimiser")
.help("Optimiser to use for policy trained on TSPG objective (see COG paper).")
.withDefault("RMSProp")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--value-optimiser")
.help("Optimiser to use for value function optimisation.")
.withDefault("RMSProp")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--checkpoint-type", "--checkpoints")
.help("When do we store checkpoints of trained weights?")
.withDefault(CheckpointTypes.Game.toString())
.withNumVals(1)
.withType(OptionTypes.String)
.withLegalVals(Arrays.stream(CheckpointTypes.values()).map(Object::toString).toArray()));
argParse.addOption(new ArgOption()
.withNames("--checkpoint-freq", "--checkpoint-frequency")
.help("Frequency of checkpoint updates")
.withDefault(Integer.valueOf(1))
.withNumVals(1)
.withType(OptionTypes.Int));
argParse.addOption(new ArgOption()
.withNames("--out-dir", "--output-directory")
.help("Filepath for output directory")
.withNumVals(1)
.withType(OptionTypes.String));
argParse.addOption(new ArgOption()
.withNames("--no-logging")
.help("If true, we don't write a bunch of messages to a log file.")
.withType(OptionTypes.Boolean));
argParse.addOption(new ArgOption()
.withNames("--useGUI")
.help("Whether to create a small GUI that can be used to "
+ "manually interrupt training run. False by default."));
argParse.addOption(new ArgOption()
.withNames("--max-wall-time")
.help("Max wall time in minutes (or -1 for no limit).")
.withDefault(Integer.valueOf(-1))
.withNumVals(1)
.withType(OptionTypes.Int));
// parse the args
if (!argParse.parseArguments(args))
return;
// use the parsed args
final ExpertIteration exIt =
new ExpertIteration
(
argParse.getValueBool("--useGUI"),
argParse.getValueInt("--max-wall-time")
);
exIt.gameParams.gameName = argParse.getValueString("--game");
exIt.gameParams.gameOptions = (List<String>) argParse.getValue("--game-options");
exIt.gameParams.ruleset = argParse.getValueString("--ruleset");
exIt.gameParams.gameLengthCap = argParse.getValueInt("--game-length-cap");
exIt.agentsParams.expertAI = argParse.getValueString("--expert-ai");
exIt.agentsParams.bestAgentsDataDir = argParse.getValueString("--best-agents-data-dir");
exIt.agentsParams.thinkingTime = argParse.getValueDouble("--thinking-time");
exIt.agentsParams.iterationLimit = argParse.getValueInt("--iteration-limit");
exIt.agentsParams.depthLimit = argParse.getValueInt("--depth-limit");
exIt.agentsParams.tournamentMode = argParse.getValueBool("--tournament-mode");
exIt.agentsParams.playoutFeaturesEpsilon = argParse.getValueDouble("--playout-features-epsilon");
exIt.agentsParams.maxNumBiasedPlayoutActions = argParse.getValueInt("--max-num-biased-playout-actions");
exIt.agentsParams.numAgentThreads = argParse.getValueInt("--num-agent-threads");
exIt.trainingParams.numTrainingGames = argParse.getValueInt("-n");
exIt.trainingParams.batchSize = argParse.getValueInt("--batch-size");
exIt.trainingParams.experienceBufferSize = argParse.getValueInt("--buffer-size");
exIt.trainingParams.updateWeightsEvery = argParse.getValueInt("--update-weights-every");
exIt.trainingParams.prioritizedExperienceReplay = argParse.getValueBool("--prioritized-experience-replay");
exIt.trainingParams.initValueFuncDir = argParse.getValueString("--init-value-func-dir");
exIt.trainingParams.numPolicyGradientEpochs = argParse.getValueInt("--num-policy-gradient-epochs");
exIt.trainingParams.numTrialsPerPolicyGradientEpoch = argParse.getValueInt("--num-trials-per-policy-gradient-epoch");
exIt.trainingParams.pgGamma = argParse.getValueDouble("--pg-gamma");
exIt.trainingParams.entropyRegWeight = argParse.getValueDouble("--entropy-reg-weight");
exIt.trainingParams.numPolicyGradientThreads = argParse.getValueInt("--num-policy-gradient-threads");
exIt.trainingParams.postPGWeightScalar = argParse.getValueDouble("--post-pg-weight-scalar");
exIt.featureDiscoveryParams.addFeatureEvery = argParse.getValueInt("--add-feature-every");
exIt.featureDiscoveryParams.noGrowFeatureSet = argParse.getValueBool("--no-grow-features");
exIt.featureDiscoveryParams.combiningFeatureInstanceThreshold = argParse.getValueInt("--combining-feature-instance-threshold");
exIt.featureDiscoveryParams.numFeatureDiscoveryThreads = argParse.getValueInt("--num-feature-discovery-threads");
exIt.featureDiscoveryParams.criticalValueCorrConf = argParse.getValueDouble("--critical-value-corr-conf");
exIt.featureDiscoveryParams.useSpecialMovesExpander = argParse.getValueBool("--special-moves-expander");
exIt.featureDiscoveryParams.useSpecialMovesExpanderSplit = argParse.getValueBool("--special-moves-expander-split");
exIt.featureDiscoveryParams.expanderType = argParse.getValueString("--expander-type");
exIt.objectiveParams.trainTSPG = argParse.getValueBool("--train-tspg");
exIt.objectiveParams.importanceSamplingEpisodeDurations = argParse.getValueBool("--is-episode-durations");
exIt.objectiveParams.weightedImportanceSampling = argParse.getValueBool("--wis");
exIt.objectiveParams.noValueLearning = argParse.getValueBool("--no-value-learning");
exIt.objectiveParams.handleAliasing = argParse.getValueBool("--handle-aliasing");
exIt.objectiveParams.handleAliasingPlayouts = argParse.getValueBool("--handle-aliasing-playouts");
exIt.objectiveParams.weightDecayLambda = argParse.getValueDouble("--weight-decay-lambda");
exIt.optimisersParams.selectionOptimiserConfig = argParse.getValueString("--selection-optimiser");
exIt.optimisersParams.playoutOptimiserConfig = argParse.getValueString("--playout-optimiser");
exIt.optimisersParams.ceExploreOptimiserConfig = argParse.getValueString("--cee-optimiser");
exIt.optimisersParams.tspgOptimiserConfig = argParse.getValueString("--tspg-optimiser");
exIt.optimisersParams.valueOptimiserConfig = argParse.getValueString("--value-optimiser");
exIt.outParams.checkpointType = CheckpointTypes.valueOf(argParse.getValueString("--checkpoint-type"));
exIt.outParams.checkpointFrequency = argParse.getValueInt("--checkpoint-freq");
final String outDirFilepath = argParse.getValueString("--out-dir");
if (outDirFilepath != null)
exIt.outParams.outDir = new File(outDirFilepath);
else
exIt.outParams.outDir = null;
exIt.outParams.noLogging = argParse.getValueBool("--no-logging");
exIt.startExperiment();
}
}
| 104,600 | 34.871399 | 149 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/ExpertPolicy.java | package training.expert_iteration;
import java.util.List;
import main.collections.FVector;
import main.collections.FastArrayList;
import other.AI;
import other.move.Move;
/**
* Abstract class for policies that can serve as experts in Expert Iteration
*
* @author Dennis Soemers
*/
public abstract class ExpertPolicy extends AI
{
/**
* @return Should return a list of the moves considered at the
* "root" state during the last search executed by this expert.
*/
public abstract FastArrayList<Move> lastSearchRootMoves();
/**
* @param tau Temperature parameter that may or may not be used
* by some experts. For MCTS, tau = 1.0 means proportional to
* visit counts, whereas tau --> 0.0 means greedy with respect
* to visit counts.
* @return Policy / distribution over actions as computed by expert
*/
public abstract FVector computeExpertPolicy(final double tau);
/**
* @return A list of samples of experience for Expert Iteration, based on
* the last search executed by this expert.
*/
public abstract List<ExItExperience> generateExItExperiences();
}
| 1,102 | 26.575 | 76 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/gradients/Gradients.java | package training.expert_iteration.gradients;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import features.FeatureVector;
import gnu.trove.list.array.TIntArrayList;
import main.collections.FVector;
import metadata.ai.heuristics.Heuristics;
import optimisers.Optimiser;
import policies.softmax.SoftmaxPolicyLinear;
import training.expert_iteration.ExItExperience;
/**
* Class with helper methods to compute gradients for self-play training
* (and related stuff, like errors/losses)
*
* @author Dennis Soemers
*/
public class Gradients
{
//-------------------------------------------------------------------------
/**
* Don't need a constructor for this class
*/
private Gradients()
{
// Do nothing;
}
//-------------------------------------------------------------------------
/**
* @param estimatedDistribution
* @param targetDistribution
* @return Vector of errors for estimated distribution in comparison to
* target distribution (simply estimated - target)
*/
public static FVector computeDistributionErrors
(
final FVector estimatedDistribution,
final FVector targetDistribution
)
{
final FVector errors = estimatedDistribution.copy();
errors.subtract(targetDistribution);
return errors;
}
public static FVector computeCrossEntropyErrors
(
final SoftmaxPolicyLinear policy,
final FVector expertDistribution,
final FeatureVector[] featureVectors,
final int p,
final boolean handleAliasing
)
{
final FVector apprenticePolicy = policy.computeDistribution(featureVectors, p);
final FVector expertPolicy;
if (handleAliasing)
{
// Need to handle aliased moves
final Map<FeatureVector, TIntArrayList> movesPerFeatureVector =
new HashMap<FeatureVector, TIntArrayList>();
for (int moveIdx = 0; moveIdx < featureVectors.length; ++moveIdx)
{
final FeatureVector featureVector = featureVectors[moveIdx];
if (!movesPerFeatureVector.containsKey(featureVector))
movesPerFeatureVector.put(featureVector, new TIntArrayList());
movesPerFeatureVector.get(featureVector).add(moveIdx);
}
expertPolicy = expertDistribution.copy(); // Don't want to permanently modify the original
final boolean[] alreadyUpdatedValue = new boolean[expertPolicy.dim()];
for (int moveIdx = 0; moveIdx < expertPolicy.dim(); ++moveIdx)
{
if (alreadyUpdatedValue[moveIdx])
continue;
final TIntArrayList aliasedMoves = movesPerFeatureVector.get(featureVectors[moveIdx]);
if (aliasedMoves.size() > 1)
{
//System.out.println(aliasedMoves.size() + " aliased moves");
float maxVal = 0.f;
for (int i = 0; i < aliasedMoves.size(); ++i)
{
final float val = expertPolicy.get(aliasedMoves.getQuick(i));
if (val > maxVal)
maxVal = val;
}
// Set all aliased moves to the max probability
for (int i = 0; i < aliasedMoves.size(); ++i)
{
expertPolicy.set(aliasedMoves.getQuick(i), maxVal);
alreadyUpdatedValue[aliasedMoves.getQuick(i)] = true;
}
}
}
// Renormalise the expert policy
expertPolicy.normalise();
// System.out.println("---------------------------------------------------");
// for (final Entry<FeatureVector, TIntArrayList> entry : movesPerFeatureVector.entrySet())
// {
// if (entry.getValue().size() > 1)
// {
// final FVector origErrors = cePolicy.computeDistributionErrors(apprenticePolicy, sample.expertDistribution());
// final FVector modifiedErrors = cePolicy.computeDistributionErrors(apprenticePolicy, expertPolicy);
// System.out.print("Orig errors for repeated feature vector: ");
// for (int moveIdx = 0; moveIdx < entry.getValue().size(); ++moveIdx)
// {
// System.out.print(origErrors.get(entry.getValue().getQuick(moveIdx)) + ", ");
// }
// System.out.println();
// System.out.print("Modified errors for repeated feature vector: ");
// for (int moveIdx = 0; moveIdx < entry.getValue().size(); ++moveIdx)
// {
// System.out.print(modifiedErrors.get(entry.getValue().getQuick(moveIdx)) + ", ");
// }
// System.out.println();
// }
// }
// System.out.println("---------------------------------------------------");
}
else
{
expertPolicy = expertDistribution;
}
return computeDistributionErrors(apprenticePolicy, expertPolicy);
}
/**
* @param valueFunction
* @param p
* @param sample
* @return Vector of value function gradients, or null if value function is null or player is invalid.
*/
public static FVector computeValueGradients(final Heuristics valueFunction, final int p, final ExItExperience sample)
{
if (valueFunction != null && p > 0)
{
// Compute gradients for value function
final FVector valueFunctionParams = valueFunction.paramsVector();
final float predictedValue = (float) Math.tanh(valueFunctionParams.dot(sample.stateFeatureVector()));
final float gameOutcome = (float) sample.playerOutcomes()[sample.state().state().mover()];
final float valueError = predictedValue - gameOutcome;
final FVector valueGradients = new FVector(valueFunctionParams.dim());
// Need to multiply this by feature value to compute gradient per feature
final float gradDivFeature = 2.f * valueError * (1.f - predictedValue*predictedValue);
for (int i = 0; i < valueGradients.dim(); ++i)
{
valueGradients.set(i, gradDivFeature * sample.stateFeatureVector().get(i));
}
// System.out.println();
// System.out.println("State Features = " + sample.stateFeatureVector());
// System.out.println("pred. value = " + predictedValue);
// System.out.println("observed outcome = " + gameOutcome);
// System.out.println("value error = " + valueError);
// System.out.println("value grads = " + valueGradients);
// System.out.println();
}
return null;
}
//-------------------------------------------------------------------------
/**
* @param gradientVectors
* @return Mean vector of gradients, or null if there are no vectors of gradients.
*/
public static FVector meanGradients(final List<FVector> gradientVectors)
{
if (!gradientVectors.isEmpty())
return FVector.mean(gradientVectors);
return null;
}
/**
* @param gradientVectors
* @param sumImportanceSamplingWeights
* @return A single vector of gradients computed using Weighted Importance Sampling, rather
* than by taking directly the mean of the given list of vectors, or null if there are no
* vectors of gradients.
*/
public static FVector wisGradients
(
final List<FVector> gradientVectors, final float sumImportanceSamplingWeights
)
{
if (gradientVectors.isEmpty())
return null;
final FVector wisGradients = gradientVectors.get(0).copy();
for (int i = 1; i < gradientVectors.size(); ++i)
{
wisGradients.add(gradientVectors.get(i));
}
if (sumImportanceSamplingWeights > 0.0)
wisGradients.div(sumImportanceSamplingWeights);
return wisGradients;
}
//-------------------------------------------------------------------------
/**
* Runs a gradient descent step + weight decay to minimise some loss for
* which the gradients are provided.
*
* @param optimiser
* @param params
* @param gradients
* @param weightDecayLambda
*/
public static void minimise
(
final Optimiser optimiser,
final FVector params,
final FVector gradients,
final float weightDecayLambda
)
{
final FVector weightDecayVector = new FVector(params);
weightDecayVector.mult(weightDecayLambda);
optimiser.minimiseObjective(params, gradients);
params.subtract(weightDecayVector);
}
/**
* Runs a gradient ascent step + weight decay to maximise some objective for
* which the gradients are provided.
*
* @param optimiser
* @param params
* @param gradients
* @param weightDecayLambda
*/
public static void maximise
(
final Optimiser optimiser,
final FVector params,
final FVector gradients,
final float weightDecayLambda
)
{
final FVector weightDecayVector = new FVector(params);
weightDecayVector.mult(weightDecayLambda);
optimiser.maximiseObjective(params, gradients);
params.subtract(weightDecayVector);
}
//-------------------------------------------------------------------------
}
| 8,322 | 29.599265 | 118 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/menageries/AgentCheckpoint.java | package training.expert_iteration.menageries;
import java.io.IOException;
import game.Game;
import main.FileHandling;
import main.grammar.Report;
import metadata.ai.agents.BestAgent;
import metadata.ai.features.Features;
import metadata.ai.heuristics.Heuristics;
import policies.softmax.SoftmaxPolicyLinear;
import search.mcts.MCTS;
import search.mcts.backpropagation.AlphaGoBackprop;
import search.mcts.finalmoveselection.RobustChild;
import search.mcts.playout.RandomPlayout;
import search.mcts.selection.NoisyAG0Selection;
import search.minimax.AlphaBetaSearch;
import training.expert_iteration.ExpertPolicy;
import training.expert_iteration.params.AgentsParams;
import utils.AIFactory;
/**
* A checkpoint containing all the data required to reproduce a version of an
* agent at some point in a training process.
*
* @author Dennis Soemers
*/
public class AgentCheckpoint
{
//-------------------------------------------------------------------------
/** Type of agent */
protected final String agentName;
/** Descriptor of this agent in population */
protected final String checkpointName;
/** Features metadata (can be null if this checkpoint doesn't use features) */
protected final Features featuresMetadata;
/** Heuristics metadata (can be null if this checkpoint doesn't use heuristics) */
protected final Heuristics heuristicsMetadata;
//-------------------------------------------------------------------------
/**
* Constructor
* @param agentName
* @param checkpointName
* @param featuresMetadata
* @param heuristicsMetadata
*/
public AgentCheckpoint
(
final String agentName,
final String checkpointName,
final Features featuresMetadata,
final Heuristics heuristicsMetadata
)
{
this.agentName = agentName;
this.checkpointName = checkpointName;
this.featuresMetadata = featuresMetadata;
this.heuristicsMetadata = heuristicsMetadata;
}
//-------------------------------------------------------------------------
/**
* @param game
* @param agentsParams
* @return An agent generated based on this checkpoint
*/
public ExpertPolicy generateAgent(final Game game, final AgentsParams agentsParams)
{
final ExpertPolicy ai;
if (agentName.equals("BEST_AGENT"))
{
try
{
final BestAgent bestAgent = (BestAgent)compiler.Compiler.compileObject
(
FileHandling.loadTextContentsFromFile(agentsParams.bestAgentsDataDir + "/BestAgent.txt"),
"metadata.ai.agents.BestAgent",
new Report()
);
if (bestAgent.agent().equals("AlphaBeta") || bestAgent.agent().equals("Alpha-Beta"))
{
ai = new AlphaBetaSearch(agentsParams.bestAgentsDataDir + "/BestHeuristics.txt");
}
else if (bestAgent.agent().equals("AlphaBetaMetadata"))
{
ai = new AlphaBetaSearch();
}
else if (bestAgent.agent().equals("UCT"))
{
ai = (ExpertPolicy) AIFactory.createAI("UCT");
}
else if (bestAgent.agent().equals("MC-GRAVE"))
{
ai = (ExpertPolicy) AIFactory.createAI("MC-GRAVE");
}
else if (bestAgent.agent().equals("MC-BRAVE"))
{
ai = (ExpertPolicy) AIFactory.createAI("MC-BRAVE");
}
else if (bestAgent.agent().equals("Biased MCTS"))
{
final Features features = (Features)compiler.Compiler.compileObject
(
FileHandling.loadTextContentsFromFile(agentsParams.bestAgentsDataDir + "/BestFeatures.txt"),
"metadata.ai.features.Features",
new Report()
);
// TODO compare features string to features string in training process, use that if same?
ai = MCTS.createBiasedMCTS(features, agentsParams.playoutFeaturesEpsilon);
}
else if (bestAgent.agent().equals("Biased MCTS (Uniform Playouts)"))
{
final Features features = (Features)compiler.Compiler.compileObject
(
FileHandling.loadTextContentsFromFile(agentsParams.bestAgentsDataDir + "/BestFeatures.txt"),
"metadata.ai.features.Features",
new Report()
);
ai = MCTS.createBiasedMCTS(features, 1.0);
}
else if (bestAgent.agent().equals("Random"))
{
// Don't want to train with Random, so we'll take UCT instead
ai = MCTS.createUCT();
}
else
{
System.err.println("Unrecognised best agent: " + bestAgent.agent());
return null;
}
}
catch (final IOException e)
{
e.printStackTrace();
return null;
}
}
else if (agentName.equals("FROM_METADATA"))
{
ai = (ExpertPolicy) AIFactory.fromMetadata(game);
if (ai == null)
{
System.err.println("AI from metadata is null!");
return null;
}
}
else if (agentName.equals("Biased MCTS"))
{
ai = MCTS.createBiasedMCTS(featuresMetadata, agentsParams.playoutFeaturesEpsilon);
ai.setFriendlyName("Biased MCTS");
}
else if (agentName.equals("PVTS"))
{
final MCTS mcts =
new MCTS
(
new NoisyAG0Selection(),
new RandomPlayout(0),
new AlphaGoBackprop(),
new RobustChild()
);
mcts.setLearnedSelectionPolicy(SoftmaxPolicyLinear.constructSelectionPolicy(featuresMetadata, 0.0));
mcts.setPlayoutValueWeight(0.0);
mcts.setWantsMetadataHeuristics(false);
mcts.setHeuristics(heuristicsMetadata);
mcts.setFriendlyName("PVTS");
ai = mcts;
}
else if (agentName.equals("UCT"))
{
ai = MCTS.createUCT();
}
else if (agentName.equals("MC-GRAVE"))
{
ai = (ExpertPolicy) AIFactory.createAI("MC-GRAVE");
}
else if (agentName.equals("MC-BRAVE"))
{
ai = (ExpertPolicy) AIFactory.createAI("MC-BRAVE");
}
else
{
System.err.println("Cannot recognise expert AI: " + agentsParams.expertAI);
return null;
}
if (ai instanceof MCTS)
{
// Need to preserve root node such that we can extract distributions from it
((MCTS) ai).setPreserveRootNode(true);
}
return ai;
}
//-------------------------------------------------------------------------
/**
* @return Descriptor of this agent in the population
*/
public String checkpointName()
{
return checkpointName;
}
//-------------------------------------------------------------------------
}
| 6,133 | 26.506726 | 103 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/menageries/Menagerie.java | package training.expert_iteration.menageries;
import java.util.List;
import game.Game;
import metadata.ai.features.Features;
import metadata.ai.heuristics.Heuristics;
import other.context.Context;
import training.expert_iteration.ExpertPolicy;
import training.expert_iteration.params.AgentsParams;
/**
* Interface for "menageries": objects that can tell us which agents to use
* in self-play games of a self-play training process. The term "menagier" is
* inspired by "A Generalized Framework for Self-Play Training" by Hernandez et al., 2019.
* However, in our case the menagerie actually also fulfills the roles of the
* policy sampling distribution and the curator.
*
* @author Dennis Soemers
*/
public interface Menagerie
{
//-------------------------------------------------------------------------
/**
* @param game
* @param agentsParams
* @return List of agents to use in a single self-play game
*/
public DrawnAgentsData drawAgents(final Game game, final AgentsParams agentsParams);
//-------------------------------------------------------------------------
/**
* Initialise our population of checkpoints (+ dev)
*
* @param game
* @param agentsParams
* @param features
* @param heuristics
*/
public void initialisePopulation
(
final Game game,
final AgentsParams agentsParams,
final Features features,
final Heuristics heuristics
);
//-------------------------------------------------------------------------
/**
* Update the dev checkpoint's features
* @param features
*/
public void updateDevFeatures(final Features features);
/**
* Update the dev checkpoint's heuristics
* @param heuristics
*/
public void updateDevHeuristics(final Heuristics heuristics);
/**
* Update the menagerie based on an outcome of a self-play trial
* @param context
* @param drawnAgentsData
*/
public void updateOutcome(final Context context, final DrawnAgentsData drawnAgentsData);
//-------------------------------------------------------------------------
/**
* @return String describing the menagerie's data, for log (or null if nothing to log)
*/
public String generateLog();
//-------------------------------------------------------------------------
/**
* Data describing a collection of agents that has been drawn. Specific
* implementations of Menageries may use subclasses of this to include
* additional data that they might need.
*
* @author Dennis Soemers
*/
public static class DrawnAgentsData
{
/** List of experts */
protected final List<ExpertPolicy> agents;
/**
* Constructor
* @param agents
*/
public DrawnAgentsData(final List<ExpertPolicy> agents)
{
this.agents = agents;
}
/**
* @return List of expert agents
*/
public List<ExpertPolicy> getAgents()
{
return agents;
}
}
//-------------------------------------------------------------------------
}
| 2,942 | 24.37069 | 90 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/menageries/NaiveSelfPlay.java | package training.expert_iteration.menageries;
import java.util.ArrayList;
import java.util.List;
import game.Game;
import metadata.ai.features.Features;
import metadata.ai.heuristics.Heuristics;
import other.context.Context;
import training.expert_iteration.ExpertPolicy;
import training.expert_iteration.params.AgentsParams;
/**
* Naive self-play menagerie: always uses the latest version of the trained agent,
* for all player IDs.
*
* @author Dennis Soemers
*/
public class NaiveSelfPlay implements Menagerie
{
//-------------------------------------------------------------------------
/** Our dev checkpoint (the only one we actually use) */
private AgentCheckpoint dev;
//-------------------------------------------------------------------------
@Override
public DrawnAgentsData drawAgents(final Game game, final AgentsParams agentsParams)
{
final List<ExpertPolicy> agents = new ArrayList<ExpertPolicy>(game.players().count() + 1);
agents.add(null);
for (int p = 1; p <= game.players().count(); ++p)
{
agents.add(dev.generateAgent(game, agentsParams));
}
return new DrawnAgentsData(agents);
}
//-------------------------------------------------------------------------
@Override
public void initialisePopulation
(
final Game game,
final AgentsParams agentsParams,
final Features features,
final Heuristics heuristics
)
{
dev = new AgentCheckpoint(agentsParams.expertAI, "Dev", features, heuristics);
}
@Override
public void updateDevFeatures(final Features features)
{
dev = new AgentCheckpoint(dev.agentName, "Dev", features, dev.heuristicsMetadata);
}
@Override
public void updateDevHeuristics(final Heuristics heuristics)
{
dev = new AgentCheckpoint(dev.agentName, "Dev", dev.featuresMetadata, heuristics);
}
@Override
public void updateOutcome(final Context context, final DrawnAgentsData drawnAgentsData)
{
// Nothing to do here
}
@Override
public String generateLog()
{
return null;
}
//-------------------------------------------------------------------------
}
| 2,073 | 24.292683 | 92 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/menageries/TournamentMenagerie.java | package training.expert_iteration.menageries;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import game.Game;
import gnu.trove.list.array.TFloatArrayList;
import gnu.trove.list.array.TIntArrayList;
import main.collections.FVector;
import metadata.ai.features.Features;
import metadata.ai.heuristics.Heuristics;
import other.RankUtils;
import other.context.Context;
import training.expert_iteration.ExpertPolicy;
import training.expert_iteration.params.AgentsParams;
/**
* Menagerie for Elo-based tournament mode (like in Polygames)
*
* @author Dennis Soemers
*/
public class TournamentMenagerie implements Menagerie
{
//-------------------------------------------------------------------------
/** Our dev checkpoint */
private AgentCheckpoint dev;
/** Population of checkpoints */
private final List<AgentCheckpoint> population = new ArrayList<AgentCheckpoint>();
/** Indexed by index of population list; Elo rating (one for every agent in population) */
private TFloatArrayList populationElosTable;
/** Elo rating for dev */
private float devElo = 0.f;
/** First indexed by Player ID (in game), secondly indexed by index of population list */
private TIntArrayList[] agentPickCounts;
/** How many checkpoints do we have? */
private int checkpointCounter = 0;
/** Do we have to add our new dev to the population? */
private boolean shouldAddDev = false;
//-------------------------------------------------------------------------
@Override
public TournamentDrawnAgentsData drawAgents(final Game game, final AgentsParams agentsParams)
{
if (shouldAddDev)
{
population.add(dev);
shouldAddDev = false;
// Initialise Elo rating for new checkpoint
if (checkpointCounter > 0)
populationElosTable.add(devElo);
else
populationElosTable.add(0.f);
for (int p = 1; p < agentPickCounts.length; ++p)
{
agentPickCounts[p].add(0);
}
++checkpointCounter;
}
final int[] agentIndices = new int[agentPickCounts.length];
agentIndices[0] = -1;
final List<ExpertPolicy> agents = new ArrayList<ExpertPolicy>(agentPickCounts.length);
agents.add(null);
// We will always use dev for at least one of the players
final int devIndex = ThreadLocalRandom.current().nextInt(1, agentPickCounts.length);
for (int p = 1; p < agentPickCounts.length; ++p)
{
if (p == devIndex)
{
agents.add(dev.generateAgent(game, agentsParams));
agentIndices[p] = -1;
}
else
{
// Compute vector of probabilities for this player for all checkpoints based on Elo ratings
final FVector probs = new FVector(populationElosTable);
final float max = probs.max();
for (int i = 0; i < probs.dim(); ++i)
{
probs.set(i, (float) Math.exp((probs.get(i) - max) / 400.f));
}
final int sampledAgentIdx = probs.sampleProportionally();
agents.add(population.get(sampledAgentIdx).generateAgent(game, agentsParams));
agentIndices[p] = sampledAgentIdx;
agentPickCounts[p].setQuick(sampledAgentIdx, agentPickCounts[p].getQuick(sampledAgentIdx) + 1);
}
}
return new TournamentDrawnAgentsData(agents, devIndex, agentIndices);
}
//-------------------------------------------------------------------------
@Override
public void initialisePopulation
(
final Game game,
final AgentsParams agentsParams,
final Features features,
final Heuristics heuristics
)
{
dev = new AgentCheckpoint(agentsParams.expertAI, "Checkpoint " + checkpointCounter, features, heuristics);
devElo = 0.f;
population.clear();
shouldAddDev = true;
final int numPlayers = game.players().count();
populationElosTable = new TFloatArrayList();
agentPickCounts = new TIntArrayList[numPlayers + 1];
for (int p = 1; p <= numPlayers; ++p)
{
agentPickCounts[p] = new TIntArrayList();
}
// Start out with plain UCT and MC-GRAVE agents
for (final String startingAgent : new String[]{"UCT", "MC-GRAVE"})
{
population.add(new AgentCheckpoint(startingAgent, startingAgent, null, null));
populationElosTable.add(0.f);
for (int p = 1; p <= numPlayers; ++p)
{
agentPickCounts[p].add(0);
}
}
}
@Override
public void updateDevFeatures(final Features features)
{
dev = new AgentCheckpoint(dev.agentName, "Checkpoint " + checkpointCounter, features, dev.heuristicsMetadata);
shouldAddDev = true;
}
@Override
public void updateDevHeuristics(final Heuristics heuristics)
{
dev = new AgentCheckpoint(dev.agentName, "Checkpoint " + checkpointCounter, dev.featuresMetadata, heuristics);
shouldAddDev = true;
}
@Override
public void updateOutcome(final Context context, final DrawnAgentsData drawnAgentsData)
{
final TournamentDrawnAgentsData d = (TournamentDrawnAgentsData) drawnAgentsData;
final double[] utilities = RankUtils.agentUtilities(context);
float sumElos = 0.f;
for (int p = 1; p < agentPickCounts.length; ++p)
{
if (p == d.devIdx())
sumElos += devElo;
else
sumElos += populationElosTable.getQuick(d.agentIndices()[p]);
}
// Compute by how much to adjust all the Elo ratings
final float[] elosToAdd = new float[agentPickCounts.length];
for (int p = 1; p < agentPickCounts.length; ++p)
{
final double pUtility = utilities[p];
final float pElo;
if (p == d.devIdx())
pElo = devElo;
else
pElo = populationElosTable.getQuick(d.agentIndices()[p]);
final float avgOpponentsElo = (sumElos - pElo) / (agentPickCounts.length - 1);
final double expectedWinProb = 1.0 / (1.0 + (Math.pow(10.0, (pElo - avgOpponentsElo) / 400.0)));
final double expectedUtil = 2.0 * expectedWinProb - 1.0;
elosToAdd[p] += 15 * (pUtility - expectedUtil);
}
// Do the actual Elo updates
for (int p = 1; p < agentPickCounts.length; ++p)
{
if (p == d.devIdx())
devElo += elosToAdd[p];
else
populationElosTable.setQuick(d.agentIndices()[p], populationElosTable.getQuick(d.agentIndices()[p]) + elosToAdd[p]);
}
}
//-------------------------------------------------------------------------
@Override
public String generateLog()
{
final StringBuilder sb = new StringBuilder();
sb.append("\nDev Elo: " + devElo + "\n");
sb.append("Checkpoint Elos:\n");
for (int i = 0; i < population.size(); ++i)
{
sb.append(population.get(i).checkpointName() + ": " + populationElosTable.getQuick(i) + "\n");
}
sb.append("\n");
sb.append("Checkpoint Pick Counts:\n");
for (int i = 0; i < population.size(); ++i)
{
sb.append(population.get(i).checkpointName() + ": ");
for (int p = 1; p < agentPickCounts.length; ++p)
{
sb.append(agentPickCounts[p].getQuick(i));
if (p + 1 < agentPickCounts.length)
sb.append(", ");
}
sb.append("\n");
}
sb.append("\n");
return sb.toString();
}
//-------------------------------------------------------------------------
/**
* Subclass of DrawnAgentsData; additionally remembers indexes of agents
* that were drawn, such that we can correctly update Elo ratings when
* trial is done.
*
* @author Dennis Soemers
*/
public static class TournamentDrawnAgentsData extends DrawnAgentsData
{
/** Player index for which we picked the dev checkpoint */
private final int devIdx;
/** For every player ID (except devIdx), the index of the checkpoint we used there */
private final int[] agentIndices;
/**
* Constructor
* @param agents
* @param devIdx
* @param agentIndices
*/
public TournamentDrawnAgentsData(final List<ExpertPolicy> agents, final int devIdx, final int[] agentIndices)
{
super(agents);
this.devIdx = devIdx;
this.agentIndices = agentIndices;
}
/**
* @return For every player ID (except devIdx), the index of the checkpoint we used there
*/
public int[] agentIndices()
{
return agentIndices;
}
/**
* @return Player index for which we picked the dev checkpoint
*/
public int devIdx()
{
return devIdx;
}
}
//-------------------------------------------------------------------------
}
| 8,125 | 26.828767 | 120 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/AgentsParams.java | package training.expert_iteration.params;
/**
* Wrapper around params for agents setup/configuration in training runs.
*
* @author Dennis Soemers
*/
public class AgentsParams
{
//-------------------------------------------------------------------------
/** Type of AI to use as expert */
public String expertAI;
/** Filepath for best agents data directory for this specific game (+ options) */
public String bestAgentsDataDir;
/** Max allowed thinking time per move (in seconds) */
public double thinkingTime;
/** Max allowed number of MCTS iterations per move */
public int iterationLimit;
/** Search depth limit (for e.g. Alpha-Beta experts) */
public int depthLimit;
/** Maximum number of actions per playout which we'll bias using features (-1 for no limit) */
public int maxNumBiasedPlayoutActions;
/** If true, use tournament mode (similar to the one in Polygames) */
public boolean tournamentMode;
/** Epsilon for epsilon-greedy features-based playouts */
public double playoutFeaturesEpsilon;
/** Number of threads to use for Tree Parallelisation in MCTS-based agents */
public int numAgentThreads;
//-------------------------------------------------------------------------
}
| 1,233 | 27.697674 | 95 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/FeatureDiscoveryParams.java | package training.expert_iteration.params;
/**
* Wrapper around params for feature discovery settings.
*
* @author Dennis Soemers
*/
public class FeatureDiscoveryParams
{
//-------------------------------------------------------------------------
/** After this many training games, we add a new feature. */
public int addFeatureEvery;
/** If true, we'll not grow feature set (but still train weights) */
public boolean noGrowFeatureSet;
/** At most this number of feature instances will be taken into account when combining features */
public int combiningFeatureInstanceThreshold;
/** Number of threads to use for parallel feature discovery */
public int numFeatureDiscoveryThreads;
/** Critical value used when computing confidence intervals for correlations */
public double criticalValueCorrConf;
/** If true, use a special-moves expander in addition to the normal one */
public boolean useSpecialMovesExpander;
/** If true, use a special-moves expander in addition to the normal one, but split time with the normal one (so same number of total features) */
public boolean useSpecialMovesExpanderSplit;
/** Type of feature set expander to use */
public String expanderType;
//-------------------------------------------------------------------------
}
| 1,299 | 31.5 | 146 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/GameParams.java | package training.expert_iteration.params;
import java.util.List;
/**
* Wrapper around params for game setup/configuration in training runs.
*
* @author Dennis Soemers
*/
public class GameParams
{
//-------------------------------------------------------------------------
/** Name of the game to play. Should end with .lud */
public String gameName;
/** List of game options to use when compiling game */
public List<String> gameOptions;
/** Name of ruleset to compile. Any options will be ignored if ruleset is provided. */
public String ruleset;
/** Maximum game duration (in moves) */
public int gameLengthCap;
//-------------------------------------------------------------------------
}
| 721 | 23.066667 | 87 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/ObjectiveParams.java | package training.expert_iteration.params;
/**
* Wrapper around params for objective function(s) in training runs.
*
* @author Dennis Soemers
*/
public class ObjectiveParams
{
//-------------------------------------------------------------------------
/** If true, we'll train a policy on TSPG objective (see CoG 2019 paper) */
public boolean trainTSPG;
/** If true, we'll use importance sampling weights based on episode durations for CE-loss */
public boolean importanceSamplingEpisodeDurations;
/** If true, we use Weighted Importance Sampling instead of Ordinary Importance Sampling for any of the above */
public boolean weightedImportanceSampling;
/** If true, we don't do any value function learning */
public boolean noValueLearning;
/** If true, we handle move aliasing by putting the maximum mass among all aliased moves on each of them, for training selection policy. */
public boolean handleAliasing;
/** If true, we handle move aliasing by putting the maximum mass among all aliased moves on each of them, for training playout policy. */
public boolean handleAliasingPlayouts;
/** Lambda param for weight decay (~= 2c for L2 regularisation, in absence of momentum) */
public double weightDecayLambda;
//-------------------------------------------------------------------------
}
| 1,331 | 35 | 140 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/OptimisersParams.java | package training.expert_iteration.params;
/**
* Wrapper around params for optimisers in training runs.
*
* @author Dennis Soemers
*/
public class OptimisersParams
{
//-------------------------------------------------------------------------
/** Optimiser to use when optimising policy for Selection phase */
public String selectionOptimiserConfig;
/** Optimiser to use when optimising policy for Playout phase */
public String playoutOptimiserConfig;
/** Optimiser to use when optimising the Cross-Entropy Exploration policy */
public String ceExploreOptimiserConfig;
/** Optimiser to use when optimising policy on TSPG objective (see CoG 2019 paper) */
public String tspgOptimiserConfig;
/** Optimiser to use when optimising value function */
public String valueOptimiserConfig;
//-------------------------------------------------------------------------
}
| 890 | 27.741935 | 86 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/OutParams.java | package training.expert_iteration.params;
import java.io.File;
/**
* Wrapper around params for output/file writing.
*
* @author Dennis Soemers
*/
public class OutParams
{
//-------------------------------------------------------------------------
/**
* When do we want to store checkpoints of trained weights?
* @author Dennis Soemers
*/
public enum CheckpointTypes
{
/** Store checkpoint after N self-play training games */
Game,
/** Store checkpoint after N weight updates */
WeightUpdate
}
//-------------------------------------------------------------------------
/** Output directory */
public File outDir;
/** When do we store checkpoints of trained weights? */
public CheckpointTypes checkpointType;
/** Frequency of checkpoint updates */
public int checkpointFrequency;
/** If true, we suppress a bunch of log messages to a log file. */
public boolean noLogging;
//-------------------------------------------------------------------------
}
| 1,002 | 21.795455 | 76 | java |
Ludii | Ludii-master/AI/src/training/expert_iteration/params/TrainingParams.java | package training.expert_iteration.params;
/**
* Wrapper around params for basic training setup/configuration.
*
* @author Dennis Soemers
*/
public class TrainingParams
{
//-------------------------------------------------------------------------
/** Number of training games to run */
public int numTrainingGames;
/** Max size of minibatches in training. */
public int batchSize;
/** Max size of the experience buffer. */
public int experienceBufferSize;
/** After this many moves (decision points) in training games, we update weights. */
public int updateWeightsEvery;
/** If true, we'll use prioritized experience replay */
public boolean prioritizedExperienceReplay;
/** If not null/empty, will try to find a good value function to start with from this directory */
public String initValueFuncDir;
/** Number of epochs to run for policy gradients. */
public int numPolicyGradientEpochs;
/** Number of trials to run per epoch for policy gradients */
public int numTrialsPerPolicyGradientEpoch;
/** Discount factor gamma for policy gradients */
public double pgGamma;
/** Weight for entropy regularisation */
public double entropyRegWeight;
/** Number of threads to use for parallel trials for policy gradients */
public int numPolicyGradientThreads;
/** After running policy gradients, scale obtained weights by this value */
public double postPGWeightScalar;
//-------------------------------------------------------------------------
}
| 1,501 | 27.884615 | 99 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.