repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.3/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.3-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.5/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.5-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.6/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.6-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.7/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.7-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.8/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.8-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.9.2/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.9.2-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 615 | 28.333333 | 78 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.9.3/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.9.3-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 615 | 28.333333 | 78 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.9.4/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.9.4-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 615 | 28.333333 | 78 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.7.9/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.9-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.0.rc2/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.0.rc2-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 617 | 28.428571 | 80 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.0/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.0.rc2-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 617 | 28.428571 | 80 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.1/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.1-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.10/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.10-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 614 | 28.285714 | 77 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.11.1/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.11.1-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 616 | 28.380952 | 79 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.11.2/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.11.2-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 616 | 28.380952 | 79 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.11.3/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.11.3-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 616 | 28.380952 | 79 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.11/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.11-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 614 | 28.285714 | 77 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.4/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.4-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.5/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.5-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.6/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.6-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.7/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.7-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.8/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.8-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.8.9/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.9-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.9.0/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.0-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.9.6/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.6-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.9.7/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.7-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.9.8/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.8-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonDatabind/generated_sources/2.9.9/PackageVersion.java | package com.fasterxml.jackson.databind.cfg;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.9-SNAPSHOT", "com.fasterxml.jackson.core", "jackson-databind");
@Override
public Version version() {
return VERSION;
}
}
| 613 | 28.238095 | 76 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.10.0/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.10.0-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 628 | 28.952381 | 89 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.3.2/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.3.2-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.5.1/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.5.1-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.7.0/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.0-rc4-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 631 | 29.095238 | 92 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.7.4/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.4-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.7.7/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.7-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.7.8/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.7.8-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.8.0.rc2/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.0.rc2-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 631 | 29.095238 | 92 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.8.0/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.0.rc2-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 631 | 29.095238 | 92 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.8.5/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.8.5-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.9.6/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.6-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/projects/JacksonXml/generated_sources/2.9.8/PackageVersion.java | package com.fasterxml.jackson.dataformat.xml;
import com.fasterxml.jackson.core.Version;
import com.fasterxml.jackson.core.Versioned;
import com.fasterxml.jackson.core.util.VersionUtil;
/**
* Automatically generated from PackageVersion.java.in during
* packageVersion-generate execution of maven-replacer-plugin in
* pom.xml.
*/
public final class PackageVersion implements Versioned {
public final static Version VERSION = VersionUtil.parseVersion(
"2.9.8-SNAPSHOT", "com.fasterxml.jackson.dataformat", "jackson-dataformat-xml");
@Override
public Version version() {
return VERSION;
}
}
| 627 | 28.904762 | 88 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/FailingTests.java | package foo.bar;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class FailingTests {
@Test
public void test0() {
assertTrue(false);
}
@Test
public void test1() {
assertTrue(false);
}
@Test
public void test2() {
assertTrue(false);
}
}
| 296 | 11.375 | 42 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/InvalidCharacters.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class InvalidCharacters {
@Test
public void test0() {
assertEquals("'" + "" + "' != '" + "#',(-2&)\r%\".#\"-'\f&.+\n3\f)0!%(\"#( -'\n20&!-- +-//\n#$'#3-%)1$(," + "'", "#',(-2&)\r%\".#\"-'\f&.+\n3\f)0!%(\"#( -'\n20&!-- +-//\n#$'#3-%)1$(,");
}
}
| 426 | 29.5 | 265 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/InvalidImport.java | package foo.bar;
import non.existing.classtoimport;
import org.junit.Test;
public class InvalidImport {
@Test
public void dummy() {
// empty
}
}
| 158 | 11.230769 | 34 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/LineCommentsWithWhitespaces.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class LineCommentsWithWhitespaces {
@Test
public void test0() {
String str = "_hello_";
assertEquals(" title=\" title=\"\" alt=\"\"", str); // (Primitive) Original Value: title=" title="" alt="" | Regression Value: title=" title="" alt=""
}
}
| 360 | 23.066667 | 161 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/Regex.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
public class Regex {
@Test
public void test0() {
String str = "\\s*(?:'((?:\\\\'|[^'])*?)'|\"((?:\\\\\"|[^\"])*?)\")\\s*";
assertNotNull(str)
}
}
| 250 | 15.733333 | 77 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/UnitTestsWithCompilationIssues.java | package foo.bar;
import org.junit.Test;
public class UnitTestsWithCompilationIssues {
@Test
public void test0() {
String str = new String();
str = 123456789;
}
@Test
public void test1() {
assertTrue(123456789 == 123456789)
}
}
| 255 | 13.222222 | 45 | java |
defects4j | defects4j-master/framework/test/resources/input/foo/bar/ValidTestClass.java | package foo.bar;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class ValidTestClass {
@Test
public void test0() {
String str = new String("123456789");
assertEquals("123456789", str);
}
@Test
public void test1() {
assertTrue(123456789 == 123456789);
}
}
| 355 | 15.952381 | 44 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/FailingTests.java | package foo.bar;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class FailingTests {
@Test
public void test0() {}
// Defects4J: flaky method
// @Test
// public void test0() {
// assertTrue(false);
// }
@Test
public void test1() {}
// Defects4J: flaky method
// @Test
// public void test1() {
// assertTrue(false);
// }
@Test
public void test2() {}
// Defects4J: flaky method
// @Test
// public void test2() {
// assertTrue(false);
// }
}
| 512 | 14.545455 | 42 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/InvalidCharacters.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class InvalidCharacters {
@Test
public void test0() {}
// Defects4J: flaky method
// @Test
// public void test0() {
// assertEquals("'" + "" + "' != '" + "#',(-2&)\r%\".#\"-'\f&.+\n3\f)0!%(\"#( -'\n20&!-- +-//\n#$'#3-%)1$(," + "'", "#',(-2&)\r%\".#\"-'\f&.+\n3\f)0!%(\"#( -'\n20&!-- +-//\n#$'#3-%)1$(,");
// }
}
| 498 | 28.352941 | 268 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/LineCommentsWithWhitespaces.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class LineCommentsWithWhitespaces {
@Test
public void test0() {}
// Defects4J: flaky method
// @Test
// public void test0() {
// String str = "_hello_";
// assertEquals(" title=\" title=\"\" alt=\"\"", str); // (Primitive) Original Value: title=" title="" alt="" | Regression Value: title=" title="" alt=""
// }
}
| 435 | 23.222222 | 164 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/Regex.java | package foo.bar;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
public class Regex {
@Test
public void test0() {}
// Defects4J: flaky method
// @Test
// public void test0() {
// String str = "\\s*(?:'((?:\\\\'|[^'])*?)'|\"((?:\\\\\"|[^\"])*?)\")\\s*";
// assertNotNull(str)
// }
}
| 325 | 17.111111 | 80 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/UnitTestsWithCompilationIssues.java | package foo.bar;
import org.junit.Test;
public class UnitTestsWithCompilationIssues {
@Test
public void test0() {}
// Defects4J: flaky method
// @Test
// public void test0() {
// String str = new String();
// str = 123456789;
// }
@Test
public void test1() {}
// Defects4J: flaky method
// @Test
// public void test1() {
// assertTrue(123456789 == 123456789)
// }
}
| 402 | 15.791667 | 45 | java |
defects4j | defects4j-master/framework/test/resources/output/foo/bar/ValidTestClass.java | package foo.bar;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class ValidTestClass {
@Test
public void test0() {
String str = new String("123456789");
assertEquals("123456789", str);
}
@Test
public void test1() {
assertTrue(123456789 == 123456789);
}
}
| 355 | 15.952381 | 44 | java |
null | tesseract-main/java/com/google/scrollview/ScrollView.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview;
import com.google.scrollview.events.SVEvent;
import com.google.scrollview.ui.SVImageHandler;
import com.google.scrollview.ui.SVWindow;
import org.piccolo2d.nodes.PImage;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.ArrayList;
import java.util.regex.Pattern;
/**
* The ScrollView class is the main class which gets started from the command
* line. It sets up LUA and handles the network processing.
* @author [email protected]
*/
public class ScrollView {
/** The port our server listens at. */
public static int SERVER_PORT = 8461;
/**
* All SVWindow objects share the same connection stream. The socket is needed
* to detect when the connection got closed, in/out are used to send and
* receive messages.
*/
private static Socket socket;
private static PrintStream out;
public static BufferedReader in;
public static float polylineXCoords[]; // The coords being received.
public static float polylineYCoords[]; // The coords being received.
public static int polylineSize; // The size of the coords arrays.
public static int polylineScanned; // The size read so far.
private static ArrayList<SVWindow> windows; // The id to SVWindow map.
private static Pattern intPattern; // For checking integer arguments.
private static Pattern floatPattern; // For checking float arguments.
/** Keeps track of the number of messages received. */
static int nrInputLines = 0;
/** Prints all received messages to the console if true. */
static boolean debugViewNetworkTraffic = false;
/** Add a new message to the outgoing queue */
public static void addMessage(SVEvent e) {
if (debugViewNetworkTraffic) {
System.out.println("(S->c) " + e.toString());
}
String str = e.toString();
// Send the whole thing as UTF8.
try {
byte [] utf8 = str.getBytes("UTF8");
out.write(utf8, 0, utf8.length);
} catch (java.io.UnsupportedEncodingException ex) {
System.out.println("Oops... can't encode to UTF8... Exiting");
System.exit(0);
}
out.println();
// Flush the output and check for errors.
boolean error = out.checkError();
if (error) {
System.out.println("Connection error. Quitting ScrollView Server...");
System.exit(0);
}
}
/** Read one message from client (assuming there are any). */
public static String receiveMessage() throws IOException {
return in.readLine();
}
/**
* The main program loop. Basically loops through receiving messages and
* processing them and then sending messages (if there are any).
*/
private static void IOLoop() {
String inputLine;
try {
while (!socket.isClosed() && !socket.isInputShutdown() &&
!socket.isOutputShutdown() &&
socket.isConnected() && socket.isBound()) {
inputLine = receiveMessage();
if (inputLine == null) {
// End of stream reached.
break;
}
nrInputLines++;
if (debugViewNetworkTraffic) {
System.out.println("(c->S," + nrInputLines + ")" + inputLine);
}
if (polylineSize > polylineScanned) {
// We are processing a polyline.
// Read pairs of coordinates separated by commas.
boolean first = true;
for (String coordStr : inputLine.split(",")) {
int coord = Integer.parseInt(coordStr);
if (first) {
polylineXCoords[polylineScanned] = coord;
} else {
polylineYCoords[polylineScanned++] = coord;
}
first = !first;
}
assert first;
} else {
// Process this normally.
processInput(inputLine);
}
}
}
// Some connection error
catch (IOException e) {
System.out.println("Connection error. Quitting ScrollView Server...");
}
System.exit(0);
}
// Parse a comma-separated list of arguments into ArrayLists of the
// possible types. Each type is stored in order, but the order
// distinction between types is lost.
// Note that the format is highly constrained to what the client used
// to send to LUA:
// Quoted string -> String.
// true or false -> Boolean.
// %f format number -> Float (no %e allowed)
// Sequence of digits -> Integer
// Nothing else allowed.
private static void parseArguments(String argList,
ArrayList<Integer> intList,
ArrayList<Float> floatList,
ArrayList<String> stringList,
ArrayList<Boolean> boolList) {
// str is only non-null if an argument starts with a single or double
// quote. str is set back to null on completion of the string with a
// matching quote. If the string contains a comma then str will stay
// non-null across multiple argStr values until a matching closing quote.
// Backslash escaped quotes do not count as terminating the string.
String str = null;
for (String argStr : argList.split(",")) {
if (str != null) {
// Last string was incomplete. Append argStr to it and restore comma.
// Execute str += "," + argStr in Java.
int length = str.length() + 1 + argStr.length();
StringBuilder appended = new StringBuilder(length);
appended.append(str);
appended.append(",");
appended.append(argStr);
str = appended.toString();
} else if (argStr.length() == 0) {
continue;
} else {
char quote = argStr.charAt(0);
// If it begins with a quote then it is a string, but may not
// end this time if it contained a comma.
if (quote == '\'' || quote == '"') {
str = argStr;
}
}
if (str != null) {
// It began with a quote. Check that it still does.
assert str.charAt(0) == '\'' || str.charAt(0) == '"';
int len = str.length();
if (len > 1 && str.charAt(len - 1) == str.charAt(0)) {
// We have an ending quote of the right type. Now check that
// it is not escaped. Must have an even number of slashes before.
int slash = len - 1;
while (slash > 0 && str.charAt(slash - 1) == '\\')
--slash;
if ((len - 1 - slash) % 2 == 0) {
// It is now complete. Chop off the quotes and save.
// TODO(rays) remove the first backslash of each pair.
stringList.add(str.substring(1, len - 1));
str = null;
}
}
// If str is not null here, then we have a string with a comma in it.
// Append , and the next argument at the next iteration, but check
// that str is null after the loop terminates in case it was an
// unterminated string.
} else if (floatPattern.matcher(argStr).matches()) {
// It is a float.
floatList.add(Float.parseFloat(argStr));
} else if (argStr.equals("true")) {
boolList.add(true);
} else if (argStr.equals("false")) {
boolList.add(false);
} else if (intPattern.matcher(argStr).matches()) {
// Only contains digits so must be an int.
intList.add(Integer.parseInt(argStr));
}
// else ignore all incompatible arguments for forward compatibility.
}
// All strings must have been terminated.
assert str == null;
}
/** Executes the LUA command parsed as parameter. */
private static void processInput(String inputLine) {
if (inputLine == null) {
return;
}
// Execute a function encoded as a LUA statement! Yuk!
if (inputLine.charAt(0) == 'w') {
// This is a method call on a window. Parse it.
String noWLine = inputLine.substring(1);
String[] idStrs = noWLine.split("[ :]", 2);
int windowID = Integer.parseInt(idStrs[0]);
// Find the parentheses.
int start = inputLine.indexOf('(');
int end = inputLine.lastIndexOf(')');
// Parse the args.
ArrayList<Integer> intList = new ArrayList<Integer>(4);
ArrayList<Float> floatList = new ArrayList<Float>(2);
ArrayList<String> stringList = new ArrayList<String>(4);
ArrayList<Boolean> boolList = new ArrayList<Boolean>(3);
parseArguments(inputLine.substring(start + 1, end),
intList, floatList, stringList, boolList);
int colon = inputLine.indexOf(':');
if (colon > 1 && colon < start) {
// This is a regular function call. Look for the name and call it.
String func = inputLine.substring(colon + 1, start);
if (func.equals("drawLine")) {
windows.get(windowID).drawLine(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else if (func.equals("createPolyline")) {
windows.get(windowID).createPolyline(intList.get(0));
} else if (func.equals("drawPolyline")) {
windows.get(windowID).drawPolyline();
} else if (func.equals("drawRectangle")) {
windows.get(windowID).drawRectangle(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else if (func.equals("setVisible")) {
windows.get(windowID).setVisible(boolList.get(0));
} else if (func.equals("setAlwaysOnTop")) {
windows.get(windowID).setAlwaysOnTop(boolList.get(0));
} else if (func.equals("addMessage")) {
windows.get(windowID).addMessage(stringList.get(0));
} else if (func.equals("addMessageBox")) {
windows.get(windowID).addMessageBox();
} else if (func.equals("clear")) {
windows.get(windowID).clear();
} else if (func.equals("setStrokeWidth")) {
windows.get(windowID).setStrokeWidth(floatList.get(0));
} else if (func.equals("drawEllipse")) {
windows.get(windowID).drawEllipse(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else if (func.equals("pen")) {
if (intList.size() == 4) {
windows.get(windowID).pen(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else {
windows.get(windowID).pen(intList.get(0), intList.get(1),
intList.get(2));
}
} else if (func.equals("brush")) {
if (intList.size() == 4) {
windows.get(windowID).brush(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else {
windows.get(windowID).brush(intList.get(0), intList.get(1),
intList.get(2));
}
} else if (func.equals("textAttributes")) {
windows.get(windowID).textAttributes(stringList.get(0),
intList.get(0),
boolList.get(0),
boolList.get(1),
boolList.get(2));
} else if (func.equals("drawText")) {
windows.get(windowID).drawText(intList.get(0), intList.get(1),
stringList.get(0));
} else if (func.equals("addMenuBarItem")) {
if (boolList.size() > 0) {
windows.get(windowID).addMenuBarItem(stringList.get(0),
stringList.get(1),
intList.get(0),
boolList.get(0));
} else if (intList.size() > 0) {
windows.get(windowID).addMenuBarItem(stringList.get(0),
stringList.get(1),
intList.get(0));
} else {
windows.get(windowID).addMenuBarItem(stringList.get(0),
stringList.get(1));
}
} else if (func.equals("addPopupMenuItem")) {
if (stringList.size() == 4) {
windows.get(windowID).addPopupMenuItem(stringList.get(0),
stringList.get(1),
intList.get(0),
stringList.get(2),
stringList.get(3));
} else {
windows.get(windowID).addPopupMenuItem(stringList.get(0),
stringList.get(1));
}
} else if (func.equals("update")) {
windows.get(windowID).update();
} else if (func.equals("showInputDialog")) {
windows.get(windowID).showInputDialog(stringList.get(0));
} else if (func.equals("showYesNoDialog")) {
windows.get(windowID).showYesNoDialog(stringList.get(0));
} else if (func.equals("zoomRectangle")) {
windows.get(windowID).zoomRectangle(intList.get(0), intList.get(1),
intList.get(2), intList.get(3));
} else if (func.equals("readImage")) {
PImage image = SVImageHandler.readImage(intList.get(2), in);
windows.get(windowID).drawImage(image, intList.get(0), intList.get(1));
} else if (func.equals("drawImage")) {
PImage image = new PImage(stringList.get(0));
windows.get(windowID).drawImage(image, intList.get(0), intList.get(1));
} else if (func.equals("destroy")) {
windows.get(windowID).destroy();
}
// else for forward compatibility purposes, silently ignore any
// unrecognized function call.
} else {
// No colon. Check for create window.
if (idStrs[1].startsWith("= luajava.newInstance")) {
while (windows.size() <= windowID) {
windows.add(null);
}
windows.set(windowID, new SVWindow(stringList.get(1),
intList.get(0), intList.get(1),
intList.get(2), intList.get(3),
intList.get(4), intList.get(5),
intList.get(6)));
}
// else for forward compatibility purposes, silently ignore any
// unrecognized function call.
}
} else if (inputLine.startsWith("svmain")) {
// Startup or end. Startup is a lua bind, which is now a no-op.
if (inputLine.startsWith("svmain:exit")) {
exit();
}
// else for forward compatibility purposes, silently ignore any
// unrecognized function call.
}
// else for forward compatibility purposes, silently ignore any
// unrecognized function call.
}
/** Called from the client to make the server exit. */
public static void exit() {
System.exit(0);
}
/**
* The main function. Sets up LUA and the server connection and then calls the
* IOLoop.
*/
public static void main(String[] args) {
if (args.length > 0) {
SERVER_PORT = Integer.parseInt(args[0]);
}
windows = new ArrayList<SVWindow>(100);
intPattern = Pattern.compile("[0-9-][0-9]*");
floatPattern = Pattern.compile("[0-9-][0-9]*\\.[0-9]*");
// Open a socket to listen on.
try (ServerSocket serverSocket = new ServerSocket(SERVER_PORT)) {
System.out.println("Socket started on port " + SERVER_PORT);
// Wait (blocking) for an incoming connection
socket = serverSocket.accept();
System.out.println("Client connected");
// Setup the streams
out = new PrintStream(socket.getOutputStream(), true, "UTF-8");
in =
new BufferedReader(new InputStreamReader(socket.getInputStream(),
"UTF8"));
} catch (IOException e) {
// Something went wrong and we were unable to set up a connection. This is
// pretty
// much a fatal error.
// Note: The server does not get restarted automatically if this happens.
e.printStackTrace();
System.exit(1);
}
// Enter the main program loop.
IOLoop();
}
}
| 17,176 | 41.517327 | 81 | java |
null | tesseract-main/java/com/google/scrollview/events/SVEvent.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.events;
import com.google.scrollview.ui.SVWindow;
/**
* The SVEvent is a structure which holds the actual values of a message to be
* transmitted. It corresponds to the client structure defined in scrollview.h
*
* @author [email protected]
*/
public class SVEvent {
SVEventType type; // What kind of event.
SVWindow window; // Window event relates to.
int x; // Coords of click or selection.
int y;
int xSize; // Size of selection.
int ySize;
int commandId;
String parameter; // Any string that might have been passed as argument.
/**
* A "normal" SVEvent.
*
* @param t The type of the event as specified in SVEventType (e.g.
* SVET_CLICK)
* @param w The window the event corresponds to
* @param x1 X position of the mouse at the time of the event
* @param y1 Y position of the mouse at the time of the event
* @param x2 X selection size at the time of the event
* @param y2 Y selection size at the time of the event
* @param p A parameter associated with the event (e.g. keyboard input)
*/
public SVEvent(SVEventType t, SVWindow w, int x1, int y1, int x2, int y2,
String p) {
type = t;
window = w;
x = x1;
y = y1;
xSize = x2;
ySize = y2;
commandId = 0;
parameter = p;
}
/**
* An event which issues a command (like clicking on a item in the menubar).
*
* @param eventtype The type of the event as specified in SVEventType
* (usually SVET_MENU or SVET_POPUP)
* @param svWindow The window the event corresponds to
* @param commandid The associated id with the command (given by the client
* on construction of the item)
* @param value A parameter associated with the event (e.g. keyboard input)
*/
public SVEvent(SVEventType eventtype, SVWindow svWindow, int commandid,
String value) {
type = eventtype;
window = svWindow;
parameter = value;
x = 0;
y = 0;
xSize = 0;
ySize = 0;
commandId = commandid;
}
/**
* This is the string representation of the message, which is what will
* actually be transferred over the network.
*/
@Override
public String toString() {
return (window.hash + "," + type.ordinal() + "," + x + "," + y + ","
+ xSize + "," + ySize + "," + commandId + "," + parameter);
}
}
| 2,941 | 32.431818 | 80 | java |
null | tesseract-main/java/com/google/scrollview/events/SVEventHandler.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.events;
import com.google.scrollview.ScrollView;
import com.google.scrollview.events.SVEvent;
import com.google.scrollview.events.SVEventType;
import com.google.scrollview.ui.SVWindow;
import org.piccolo2d.PCamera;
import org.piccolo2d.PNode;
import org.piccolo2d.event.PBasicInputEventHandler;
import org.piccolo2d.event.PInputEvent;
import org.piccolo2d.nodes.PPath;
import java.awt.Color;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.KeyEvent;
import java.awt.event.KeyListener;
import java.awt.event.WindowEvent;
import java.awt.event.WindowListener;
import java.awt.Window;
import javax.swing.Timer;
/**
* The ScrollViewEventHandler takes care of any events which might happen on the
* canvas and converts them to an according SVEvent, which is (using the
* processEvent method) then added to a message queue. All events from the
* message queue get sent gradually
*
* @author [email protected]
*/
public class SVEventHandler extends PBasicInputEventHandler implements
ActionListener, KeyListener, WindowListener {
/** Necessary to wait for a defined period of time (for SVET_HOVER). */
public Timer timer;
/** The window which the event corresponds to. */
private SVWindow svWindow;
/** These are used to determine a selection size (for SVET_SELECTION). */
private int lastX = 0;
private int lastY = 0;
/**
* These are used in case we want to transmit our position, but do not get it
* because it was no MouseEvent, in particular SVET_HOVER and SVET_INPUT.
*/
private int lastXMove = 0;
private int lastYMove = 0;
/** For Drawing a rubber-band rectangle for selection */
private int startX = 0;
private int startY = 0;
private float rubberBandTransparency = 0.5f;
private PNode selection = null;
/** The string entered since the last enter. Since the client
* end eats all newlines, we can't use the newline
* character, so use ! for now, as it cannot be entered
* directly anyway and therefore can never show up for real. */
private String keyStr = "!";
/** Setup the timer. */
public SVEventHandler(SVWindow wdw) {
timer = new Timer(1000, this);
svWindow = wdw;
}
/**
* Store the newest x,y values, add the message to the queue and restart the
* timer.
*/
private void processEvent(SVEvent e) {
lastXMove = e.x;
lastYMove = e.y;
ScrollView.addMessage(e);
timer.restart();
}
/** Show the associated popup menu at (x,y) (relative position of the window). */
private void showPopup(PInputEvent e) {
double x = e.getCanvasPosition().getX();
double y = e.getCanvasPosition().getY();
if (svWindow.svPuMenu != null) {
svWindow.svPuMenu.show(svWindow, (int) x, (int) y);
}
}
/** The mouse is clicked - create an SVET_CLICK event. */
@Override
public void mouseClicked(PInputEvent e) {
if (e.isPopupTrigger()) {
showPopup(e);
} else {
processEvent(new SVEvent(SVEventType.SVET_CLICK, svWindow, (int) e
.getPosition().getX(), (int) e.getPosition().getY(), 0, 0, null));
}
}
/**
* The mouse key is pressed (and keeps getting pressed).
* Depending on the OS, show a popup menu (if the button pressed is associated
* with popup menus, like the RMB under windows&linux) or otherwise save the
* position (in case it is a selection).
*/
@Override
public void mousePressed(PInputEvent e) {
if (e.isPopupTrigger()) {
showPopup(e);
} else {
lastX = (int) e.getPosition().getX();
lastY = (int) e.getPosition().getY();
timer.restart();
}
}
/** The mouse is getting dragged - create an SVET_MOUSE event. */
@Override
public void mouseDragged(PInputEvent e) {
processEvent(new SVEvent(SVEventType.SVET_MOUSE, svWindow, (int) e
.getPosition().getX(), (int) e.getPosition().getY(), (int) e
.getPosition().getX()
- lastX, (int) e.getPosition().getY() - lastY, null));
// Paint a selection rectangle.
if (selection == null) {
startX = (int) e.getPosition().getX();
startY = (int) e.getPosition().getY();
selection = PPath.createRectangle(startX, startY, 1, 1);
selection.setTransparency(rubberBandTransparency);
svWindow.canvas.getLayer().addChild(selection);
} else {
int right = Math.max(startX, (int) e.getPosition().getX());
int left = Math.min(startX, (int) e.getPosition().getX());
int bottom = Math.max(startY, (int) e.getPosition().getY());
int top = Math.min(startY, (int) e.getPosition().getY());
svWindow.canvas.getLayer().removeChild(selection);
selection = PPath.createRectangle(left, top, right - left, bottom - top);
selection.setPaint(Color.YELLOW);
selection.setTransparency(rubberBandTransparency);
svWindow.canvas.getLayer().addChild(selection);
}
}
/**
* The mouse was released.
* Depending on the OS, show a popup menu (if the button pressed is associated
* with popup menus, like the RMB under windows&linux) or otherwise create an
* SVET_SELECTION event.
*/
@Override
public void mouseReleased(PInputEvent e) {
if (e.isPopupTrigger()) {
showPopup(e);
} else {
processEvent(new SVEvent(SVEventType.SVET_SELECTION, svWindow, (int) e
.getPosition().getX(), (int) e.getPosition().getY(), (int) e
.getPosition().getX()
- lastX, (int) e.getPosition().getY() - lastY, null));
}
if (selection != null) {
svWindow.canvas.getLayer().removeChild(selection);
selection = null;
}
}
/**
* The mouse wheel is used to zoom in and out of the viewport and center on
* the (x,y) position the mouse is currently on.
*/
@Override
public void mouseWheelRotated(PInputEvent e) {
PCamera lc = svWindow.canvas.getCamera();
double sf = SVWindow.SCALING_FACTOR;
if (e.getWheelRotation() < 0) {
sf = 1 / sf;
}
lc.scaleViewAboutPoint(lc.getScale() / sf, e.getPosition().getX(), e
.getPosition().getY());
}
/**
* The mouse was moved - create an SVET_MOTION event. NOTE: This obviously
* creates a lot of traffic and, depending on the type of application, could
* quite possibly be disabled.
*/
@Override
public void mouseMoved(PInputEvent e) {
processEvent(new SVEvent(SVEventType.SVET_MOTION, svWindow, (int) e
.getPosition().getX(), (int) e.getPosition().getY(), 0, 0, null));
}
/**
* The mouse entered the window.
* Start the timer, which will then emit SVET_HOVER events every X ms. */
@Override
public void mouseEntered(PInputEvent e) {
timer.restart();
}
/**
* The mouse exited the window
* Stop the timer, so no more SVET_HOVER events will emit. */
@Override
public void mouseExited(PInputEvent e) {
timer.stop();
}
/**
* The only associated object with this is the timer, so we use it to send a
* SVET_HOVER event.
*/
public void actionPerformed(ActionEvent e) {
processEvent(new SVEvent(SVEventType.SVET_HOVER, svWindow, lastXMove,
lastYMove, 0, 0, null));
}
/**
* A key was pressed - create an SVET_INPUT event.
*
* NOTE: Might be useful to specify hotkeys.
*
* Implementation note: The keyListener provided by Piccolo seems to be
* broken, so we use the AWT listener directly.
* There are never any keyTyped events received either so we are
* stuck with physical keys, which is very ugly.
*/
public void keyPressed(KeyEvent e) {
char keyCh = e.getKeyChar();
if (keyCh == '\r' || keyCh == '\n' || keyCh == '\0' || keyCh == '?') {
processEvent(new SVEvent(SVEventType.SVET_INPUT, svWindow, lastXMove,
lastYMove, 0, 0, keyStr));
// Send newline characters as '!' as '!' can never be a keypressed
// and the client eats all newline characters.
keyStr = "!";
} else {
processEvent(new SVEvent(SVEventType.SVET_INPUT, svWindow, lastXMove,
lastYMove, 0, 0, String.valueOf(keyCh)));
keyStr += keyCh;
}
}
/**
* A window is closed (by the 'x') - create an SVET_DESTROY event. If it was
* the last open Window, also send an SVET_EXIT event (but do not exit unless
* the client says so).
*/
public void windowClosing(WindowEvent e) {
processEvent(new SVEvent(SVEventType.SVET_DESTROY, svWindow, lastXMove,
lastYMove, 0, 0, null));
Window w = e.getWindow();
if (w != null) {
w.dispose();
}
SVWindow.nrWindows--;
if (SVWindow.nrWindows == 0) {
processEvent(new SVEvent(SVEventType.SVET_EXIT, svWindow, lastXMove,
lastYMove, 0, 0, null));
}
}
/** These are all events we do not care about and throw away */
public void keyReleased(KeyEvent e) {
}
public void keyTyped(KeyEvent e) {
}
public void windowActivated(WindowEvent e) {
}
public void windowClosed(WindowEvent e) {
}
public void windowDeactivated(WindowEvent e) {
}
public void windowDeiconified(WindowEvent e) {
}
public void windowIconified(WindowEvent e) {
}
public void windowOpened(WindowEvent e) {
}
}
| 9,822 | 31.52649 | 83 | java |
null | tesseract-main/java/com/google/scrollview/events/SVEventType.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.events;
/**
* These are the defined events which can happen in ScrollView and be
* transferred to the client. They are same events as on the client side part of
* ScrollView (defined in ScrollView.h).
*
* @author [email protected]
*/
public enum SVEventType {
SVET_DESTROY, // Window has been destroyed by user.
SVET_EXIT, // User has destroyed the last window by clicking on the 'X'
SVET_CLICK, // Any button pressed that is not a popup trigger.
SVET_SELECTION, // Left button selection.
SVET_INPUT, // Any kind of input
SVET_MOUSE, // The mouse has moved with a button pressed.
SVET_MOTION, // The mouse has moved with no button pressed.
SVET_HOVER, // The mouse has stayed still for a second.
SVET_POPUP, // A command selected through a popup menu
SVET_MENU; // A command selected through the menubar
}
| 1,456 | 44.53125 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVAbstractMenuItem.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
/**
* A MenuListItem is any sort of menu entry. This can either be within a popup
* menu or within a menubar. It can either be a submenu (only name and
* command-id) or a name with an associated value and possibly description. They
* can also have new entries added (if they are submenus).
*
* @author [email protected]
*/
import com.google.scrollview.events.SVEventType;
import javax.swing.JMenu;
import javax.swing.JMenuItem;
abstract class SVAbstractMenuItem {
JMenuItem mi;
public String name;
public int id;
/**
* Sets the basic attributes for name, id and the corresponding swing item
*/
SVAbstractMenuItem(int id, String name, JMenuItem jmi) {
this.mi = jmi;
this.name = name;
this.id = id;
}
/** Returns the actual value of the MenuListItem. */
public String getValue() { return null; }
/** Adds a child entry to the submenu. */
public void add(SVAbstractMenuItem mli) { }
/** Adds a child menu to the submenu (or root node). */
public void add(JMenu jli) { }
/**
* What to do when user clicks on this item.
* @param window The window the event happened.
* @param eventType What kind of event will be associated
* (usually SVET_POPUP or SVET_MENU).
*/
public void performAction(SVWindow window, SVEventType eventType) {}
}
| 1,935 | 32.37931 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVCheckboxMenuItem.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
/**
* A MenuListItem is any sort of menu entry. This can either be within a popup
* menu or within a menubar. It can either be a submenu (only name and
* command-id) or a name with an associated value and possibly description. They
* can also have new entries added (if they are submenus).
*
* @author [email protected]
*/
import com.google.scrollview.ScrollView;
import com.google.scrollview.events.SVEvent;
import com.google.scrollview.events.SVEventType;
import javax.swing.JCheckBoxMenuItem;
/**
* Constructs a new menulistitem which possesses a flag that can be toggled.
*/
class SVCheckboxMenuItem extends SVAbstractMenuItem {
public boolean bvalue;
SVCheckboxMenuItem(int id, String name, boolean val) {
super(id, name, new JCheckBoxMenuItem(name, val));
bvalue = val;
}
/** What to do when user clicks on this item. */
@Override
public void performAction(SVWindow window, SVEventType eventType) {
// Checkbox entry - trigger and send event.
if (bvalue) {
bvalue = false;
} else {
bvalue = true;
}
SVEvent svme = new SVEvent(eventType, window, id, getValue());
ScrollView.addMessage(svme);
}
/** Returns the actual value of the MenuListItem. */
@Override
public String getValue() {
return Boolean.toString(bvalue);
}
}
| 1,939 | 32.448276 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVEmptyMenuItem.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
/**
* A MenuListItem is any sort of menu entry. This can either be within a popup
* menu or within a menubar. It can either be a submenu (only name and
* command-id) or a name with an associated value and possibly description. They
* can also have new entries added (if they are submenus).
*
* @author [email protected]
*/
import com.google.scrollview.ScrollView;
import com.google.scrollview.events.SVEvent;
import com.google.scrollview.events.SVEventType;
import javax.swing.JMenuItem;
/**
* Constructs a new menulistitem which just has an ID and a name attached to
* it. In this case, we will have to ask for the value of the item and its
* description if it gets called.
*/
class SVEmptyMenuItem extends SVAbstractMenuItem {
SVEmptyMenuItem(int id, String name) {
super(id, name, new JMenuItem(name));
}
/** What to do when user clicks on this item. */
@Override
public void performAction(SVWindow window, SVEventType eventType) {
// Send an event indicating that someone clicked on an entry.
// Value will be null here.
SVEvent svme =
new SVEvent(eventType, window, id, getValue());
ScrollView.addMessage(svme);
}
}
| 1,799 | 37.297872 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVImageHandler.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
import org.piccolo2d.nodes.PImage;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import javax.imageio.ImageIO;
import javax.xml.bind.DatatypeConverter;
/**
* The ScrollViewImageHandler is a helper class which takes care of image
* processing. It is used to construct an Image from the message-stream and
* basically consists of a number of utility functions to process the input
* stream.
*
* @author [email protected]
*/
public class SVImageHandler {
/* All methods are static, so we forbid to construct SVImageHandler objects */
private SVImageHandler() {
}
/**
* Reads size bytes from the stream in and interprets it as an image file,
* encoded as png, and then text-encoded as base 64, returning the decoded
* bitmap.
*
* @param size The size of the image file.
* @param in The input stream from which to read the bytes.
*/
public static PImage readImage(int size, BufferedReader in) {
char[] charbuffer = new char[size];
int numRead = 0;
while (numRead < size) {
int newRead = -1;
try {
newRead = in.read(charbuffer, numRead, size - numRead);
} catch (IOException e) {
System.out.println("Failed to read image data from socket:" + e.getMessage());
return null;
}
if (newRead < 0) {
return null;
}
numRead += newRead;
}
if (numRead != size) {
System.out.println("Failed to read image data from socket");
return null;
}
// Convert the character data to binary.
byte[] binarydata = DatatypeConverter.parseBase64Binary(new String(charbuffer));
// Convert the binary data to a byte stream and parse to image.
ByteArrayInputStream byteStream = new ByteArrayInputStream(binarydata);
try {
PImage img = new PImage(ImageIO.read(byteStream));
return img;
} catch (IOException e) {
System.out.println("Failed to decode image data from socket" + e.getMessage());
}
return null;
}
}
| 2,658 | 34.453333 | 86 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVMenuBar.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
import com.google.scrollview.events.SVEventType;
import com.google.scrollview.ui.SVWindow;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.util.HashMap;
import javax.swing.JMenu;
import javax.swing.JMenuBar;
/**
* The SVMenuBar class provides the functionality to add a menubar to
* ScrollView. Each menubar item gets associated with a (client-defined)
* command-id, which SVMenuBar will return upon clicking it.
*
* @author [email protected]
*
*/
public class SVMenuBar implements ActionListener {
/** The root entry to add items to. */
private JMenuBar root;
/** Contains a map of item name to its actual entry. */
private HashMap<String, SVAbstractMenuItem> items;
/** The window the menubar belongs to. */
private SVWindow svWindow;
/**
* Create a new SVMenuBar and place it at the top of the ScrollView window.
*
* @param scrollView The window our menubar belongs to.
*/
public SVMenuBar(SVWindow scrollView) {
root = new JMenuBar();
svWindow = scrollView;
items = new HashMap<String, SVAbstractMenuItem>();
svWindow.setJMenuBar(root);
}
/**
* A click on one of the items in our menubar has occurred. Forward it
* to the item itself to let it decide what happens.
*/
public void actionPerformed(ActionEvent e) {
// Get the corresponding menuitem.
SVAbstractMenuItem svm = items.get(e.getActionCommand());
svm.performAction(svWindow, SVEventType.SVET_MENU);
}
/**
* Add a new entry to the menubar.
*
* @param parent The menu we add our new entry to (should have been defined
* before). If the parent is "", we will add the entry to the root
* (top-level)
* @param name The caption of the new entry.
* @param id The Id of the new entry. If it is -1, the entry will be treated
* as a menu.
*/
public void add(String parent, String name, int id) {
// A duplicate entry - we just throw it away, since its already in.
if (items.get(name) != null) { return; }
// A new submenu at the top-level
if (parent.equals("")) {
JMenu jli = new JMenu(name);
SVAbstractMenuItem mli = new SVSubMenuItem(name, jli);
items.put(name, mli);
root.add(jli);
}
// A new sub-submenu
else if (id == -1) {
SVAbstractMenuItem jmi = items.get(parent);
JMenu jli = new JMenu(name);
SVAbstractMenuItem mli = new SVSubMenuItem(name, jli);
items.put(name, mli);
jmi.add(jli);
}
// A new child entry. Add to appropriate parent.
else {
SVAbstractMenuItem jmi = items.get(parent);
if (jmi == null) {
System.out.println("ERROR: Unknown parent " + parent);
System.exit(1);
}
SVAbstractMenuItem mli = new SVEmptyMenuItem(id, name);
mli.mi.addActionListener(this);
items.put(name, mli);
jmi.add(mli);
}
}
/**
* Add a new checkbox entry to the menubar.
*
* @param parent The menu we add our new entry to (should have been defined
* before). If the parent is "", we will add the entry to the root
* (top-level)
* @param name The caption of the new entry.
* @param id The Id of the new entry. If it is -1, the entry will be treated
* as a menu.
* @param b Whether the entry is initially flagged.
*
*/
public void add(String parent, String name, int id, boolean b) {
SVAbstractMenuItem jmi = items.get(parent);
if (jmi == null) {
System.out.println("ERROR: Unknown parent " + parent);
System.exit(1);
}
SVAbstractMenuItem mli = new SVCheckboxMenuItem(id, name, b);
mli.mi.addActionListener(this);
items.put(name, mli);
jmi.add(mli);
}
}
| 4,362 | 32.305344 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVMenuItem.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
/**
* A MenuListItem is any sort of menu entry. This can either be within a popup
* menu or within a menubar. It can either be a submenu (only name and
* command-id) or a name with an associated value and possibly description. They
* can also have new entries added (if they are submenus).
*
* @author [email protected]
*/
import com.google.scrollview.events.SVEventType;
import javax.swing.JMenuItem;
/**
* Constructs a new menulistitem which also has a value and a description. For
* these, we will not have to ask the server what the value is when the user
* wants to change it, but can just call the client with the new value.
*/
class SVMenuItem extends SVAbstractMenuItem {
public String value = null;
public String desc = null;
SVMenuItem(int id, String name, String v, String d) {
super(id, name, new JMenuItem(name));
value = v;
desc = d;
}
/**
* Ask the user for new input for a variable and send it.
* Depending on whether there is a description given for the entry, show
* the description in the dialog or just show the name.
*/
@Override
public void performAction(SVWindow window, SVEventType eventType) {
if (desc != null) {
window.showInputDialog(desc, value, id, eventType);
} else {
window.showInputDialog(name, value, id, eventType);
}
}
/** Returns the actual value of the MenuListItem. */
@Override
public String getValue() {
return value;
}
}
| 2,085 | 33.196721 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVPopupMenu.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
import com.google.scrollview.events.SVEventType;
import com.google.scrollview.ui.SVMenuItem;
import com.google.scrollview.ui.SVWindow;
import java.awt.Component;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.util.HashMap;
import javax.swing.JMenu;
import javax.swing.JPopupMenu;
/**
* The SVPopupMenu class provides the functionality to add a popup menu to
* ScrollView. Each popup menu item gets associated with a (client-defined)
* command-id, which SVPopupMenu will return upon clicking it.
*
* @author [email protected]
*
*/
public class SVPopupMenu implements ActionListener {
/** The root entry to add items to. */
private JPopupMenu root;
/** Contains a map of item name to its actual entry. */
private HashMap<String, SVAbstractMenuItem> items;
/** The window the menubar belongs to. */
private SVWindow svWindow;
/**
* Create a new SVPopupMenu and associate it with a ScrollView window.
*
* @param sv The window our popup menu belongs to.
*/
SVPopupMenu(SVWindow sv) {
root = new JPopupMenu();
svWindow = sv;
items = new HashMap<String, SVAbstractMenuItem>();
}
/**
* Add a new entry to the menubar. For these items, the server will poll the
* client to ask what to do.
*
* @param parent The menu we add our new entry to (should have been defined
* before). If the parent is "", we will add the entry to the root
* (top-level)
* @param name The caption of the new entry.
* @param id The Id of the new entry. If it is -1, the entry will be treated
* as a menu.
*/
public void add(String parent, String name, int id) {
// A duplicate entry - we just throw it away, since its already in.
if (items.get(name) != null) { return; }
// A new submenu at the top-level
if (parent.equals("")) {
JMenu jli = new JMenu(name);
SVAbstractMenuItem mli = new SVSubMenuItem(name, jli);
items.put(name, mli);
root.add(jli);
}
// A new sub-submenu
else if (id == -1) {
SVAbstractMenuItem jmi = items.get(parent);
JMenu jli = new JMenu(name);
SVAbstractMenuItem mli = new SVSubMenuItem(name, jli);
items.put(name, mli);
jmi.add(jli);
}
// A new child entry. Add to appropriate parent.
else {
SVAbstractMenuItem jmi = items.get(parent);
if (jmi == null) {
System.out.println("ERROR: Unknown parent " + parent);
System.exit(1);
}
SVAbstractMenuItem mli = new SVEmptyMenuItem(id, name);
mli.mi.addActionListener(this);
items.put(name, mli);
jmi.add(mli);
}
}
/**
* Add a new entry to the menubar. In this case, we also know its value and
* possibly even have a description. For these items, the server will not poll
* the client to ask what to do, but just show an input dialog and send a
* message with the new value.
*
* @param parent The menu we add our new entry to (should have been defined
* before). If the parent is "", we will add the entry to the root
* (top-level)
* @param name The caption of the new entry.
* @param id The Id of the new entry. If it is -1, the entry will be treated
* as a menu.
* @param value The value of the new entry.
* @param desc The description of the new entry.
*/
public void add(String parent, String name, int id, String value, String desc) {
SVAbstractMenuItem jmi = items.get(parent);
SVMenuItem mli = new SVMenuItem(id, name, value, desc);
mli.mi.addActionListener(this);
items.put(name, mli);
if (jmi == null) { // add to root
root.add(mli.mi);
} else { // add to parent
jmi.add(mli);
}
}
/**
* A click on one of the items in our menubar has occurred. Forward it
* to the item itself to let it decide what happens.
*/
public void actionPerformed(ActionEvent e) {
// Get the corresponding menuitem
SVAbstractMenuItem svm = items.get(e.getActionCommand());
svm.performAction(svWindow, SVEventType.SVET_POPUP);
}
/**
* Gets called by the SVEventHandler of the window to actually show the
* content of the popup menu.
*/
public void show(Component Invoker, int x, int y) {
root.show(Invoker, x, y);
}
}
| 4,926 | 32.97931 | 82 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVSubMenuItem.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
/**
* A MenuListItem is any sort of menu entry. This can either be within a popup
* menu or within a menubar. It can either be a submenu (only name and
* command-id) or a name with an associated value and possibly description. They
* can also have new entries added (if they are submenus).
*
* @author [email protected]
*/
import javax.swing.JMenu;
/** Constructs a new submenu which can hold other entries. */
class SVSubMenuItem extends SVAbstractMenuItem {
public SVSubMenuItem(String name, JMenu jli) {
super(-1, name, jli);
}
/** Adds a child entry to the submenu. */
@Override
public void add(SVAbstractMenuItem mli) {
mi.add(mli.mi);
}
/** Adds a child menu to the submenu (or root node). */
@Override
public void add(JMenu jli) {
mi.add(jli);
}
}
| 1,424 | 34.625 | 80 | java |
null | tesseract-main/java/com/google/scrollview/ui/SVWindow.java | // Copyright 2007 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); You may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package com.google.scrollview.ui;
import com.google.scrollview.ScrollView;
import com.google.scrollview.events.SVEvent;
import com.google.scrollview.events.SVEventHandler;
import com.google.scrollview.events.SVEventType;
import com.google.scrollview.ui.SVMenuBar;
import com.google.scrollview.ui.SVPopupMenu;
import org.piccolo2d.PCamera;
import org.piccolo2d.PCanvas;
import org.piccolo2d.PLayer;
import org.piccolo2d.extras.swing.PScrollPane;
import org.piccolo2d.nodes.PImage;
import org.piccolo2d.nodes.PPath;
import org.piccolo2d.nodes.PText;
import org.piccolo2d.util.PPaintContext;
import java.awt.BasicStroke;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Font;
import java.awt.GraphicsEnvironment;
import java.awt.Rectangle;
import java.awt.TextArea;
import java.awt.geom.IllegalPathStateException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
import javax.swing.SwingUtilities;
import javax.swing.WindowConstants;
/**
* The SVWindow is the top-level ui class. It should get instantiated whenever
* the user intends to create a new window. It contains helper functions to draw
* on the canvas, add new menu items, show modal dialogs etc.
*
* @author [email protected]
*/
public class SVWindow extends JFrame {
/**
* Constants defining the maximum initial size of the window.
*/
private static final int MAX_WINDOW_X = 1000;
private static final int MAX_WINDOW_Y = 800;
/* Constant defining the (approx) height of the default message box*/
private static final int DEF_MESSAGEBOX_HEIGHT = 200;
/** Constant defining the "speed" at which to zoom in and out. */
public static final double SCALING_FACTOR = 2;
/** The top level layer we add our PNodes to (root node). */
PLayer layer;
/** The current color of the pen. It is used to draw edges, text, etc. */
Color currentPenColor;
/**
* The current color of the brush. It is used to draw the interior of
* primitives.
*/
Color currentBrushColor;
/** The system name of the current font we are using (e.g.
* "Times New Roman"). */
Font currentFont;
/** The stroke width to be used. */
// This really needs to be a fixed width stroke as the basic stroke is
// anti-aliased and gets too faint, but the piccolo fixed width stroke
// is too buggy and generates missing initial moveto in path definition
// errors with a IllegalPathStateException that cannot be caught because
// it is in the automatic repaint function. If we can fix the exceptions
// in piccolo, then we can use the following instead of BasicStroke:
// import edu.umd.cs.piccolox.util.PFixedWidthStroke;
// PFixedWidthStroke stroke = new PFixedWidthStroke(0.5f);
// Instead we use the BasicStroke and turn off anti-aliasing.
BasicStroke stroke = new BasicStroke(0.5f);
/**
* A unique representation for the window, also known by the client. It is
* used when sending messages from server to client to identify him.
*/
public int hash;
/**
* The total number of created Windows. If this ever reaches 0 (apart from the
* beginning), quit the server.
*/
public static int nrWindows = 0;
/**
* The Canvas, MessageBox, EventHandler, Menubar and Popupmenu associated with
* this window.
*/
private SVEventHandler svEventHandler = null;
private SVMenuBar svMenuBar = null;
private TextArea ta = null;
public SVPopupMenu svPuMenu = null;
public PCanvas canvas;
private int winSizeX;
private int winSizeY;
/** Set the brush to an RGB color */
public void brush(int red, int green, int blue) {
brush(red, green, blue, 255);
}
/** Set the brush to an RGBA color */
public void brush(int red, int green, int blue, int alpha) {
// If alpha is zero, use a null brush to save rendering time.
if (alpha == 0) {
currentBrushColor = null;
} else {
currentBrushColor = new Color(red, green, blue, alpha);
}
}
/** Erase all content from the window, but do not destroy it. */
public void clear() {
// Manipulation of Piccolo's scene graph should be done from Swings
// event dispatch thread since Piccolo is not thread safe. This code calls
// removeAllChildren() from that thread and releases the latch.
final java.util.concurrent.CountDownLatch latch = new java.util.concurrent.CountDownLatch(1);
SwingUtilities.invokeLater(new Runnable() {
public void run() {
layer.removeAllChildren();
repaint();
latch.countDown();
}
});
try {
latch.await();
} catch (InterruptedException e) {
}
}
/**
* Start setting up a new polyline. The server will now expect
* polyline data until the polyline is complete.
*
* @param length number of coordinate pairs
*/
public void createPolyline(int length) {
ScrollView.polylineXCoords = new float[length];
ScrollView.polylineYCoords = new float[length];
ScrollView.polylineSize = length;
ScrollView.polylineScanned = 0;
}
/**
* Draw the now complete polyline.
*/
public void drawPolyline() {
int numCoords = ScrollView.polylineXCoords.length;
if (numCoords < 2) {
return;
}
PPath pn = PPath.createLine(ScrollView.polylineXCoords[0],
ScrollView.polylineYCoords[0],
ScrollView.polylineXCoords[1],
ScrollView.polylineYCoords[1]);
pn.reset();
pn.moveTo(ScrollView.polylineXCoords[0], ScrollView.polylineYCoords[0]);
for (int p = 1; p < numCoords; ++p) {
pn.lineTo(ScrollView.polylineXCoords[p], ScrollView.polylineYCoords[p]);
}
pn.closePath();
ScrollView.polylineSize = 0;
pn.setStrokePaint(currentPenColor);
pn.setPaint(null); // Don't fill the polygon - this is just a polyline.
pn.setStroke(stroke);
layer.addChild(pn);
}
/**
* Construct a new SVWindow and set it visible.
*
* @param name Title of the window.
* @param hash Unique internal representation. This has to be the same as
* defined by the client, as they use this to refer to the windows.
* @param posX X position of where to draw the window (upper left).
* @param posY Y position of where to draw the window (upper left).
* @param sizeX The width of the window.
* @param sizeY The height of the window.
* @param canvasSizeX The canvas width of the window.
* @param canvasSizeY The canvas height of the window.
*/
public SVWindow(String name, int hash, int posX, int posY, int sizeX,
int sizeY, int canvasSizeX, int canvasSizeY) {
super(name);
// Provide defaults for sizes.
if (sizeX <= 0) sizeX = canvasSizeX;
if (sizeY <= 0) sizeY = canvasSizeY;
if (canvasSizeX <= 0) canvasSizeX = sizeX;
if (canvasSizeY <= 0) canvasSizeY = sizeY;
// Avoid later division by zero.
if (sizeX <= 0) {
sizeX = 1;
canvasSizeX = sizeX;
}
if (sizeY <= 0) {
sizeY = 1;
canvasSizeY = sizeY;
}
// Initialize variables
nrWindows++;
this.hash = hash;
this.svEventHandler = new SVEventHandler(this);
this.currentPenColor = Color.BLACK;
this.currentBrushColor = Color.BLACK;
this.currentFont = new Font("Times New Roman", Font.PLAIN, 12);
// Determine the initial size and zoom factor of the window.
// If the window is too big, rescale it and zoom out.
int shrinkfactor = 1;
if (sizeX > MAX_WINDOW_X) {
shrinkfactor = (sizeX + MAX_WINDOW_X - 1) / MAX_WINDOW_X;
}
if (sizeY / shrinkfactor > MAX_WINDOW_Y) {
shrinkfactor = (sizeY + MAX_WINDOW_Y - 1) / MAX_WINDOW_Y;
}
winSizeX = sizeX / shrinkfactor;
winSizeY = sizeY / shrinkfactor;
double initialScalingfactor = 1.0 / shrinkfactor;
if (winSizeX > canvasSizeX || winSizeY > canvasSizeY) {
initialScalingfactor = Math.min(1.0 * winSizeX / canvasSizeX,
1.0 * winSizeY / canvasSizeY);
}
// Setup the actual window (its size, camera, title, etc.)
if (canvas == null) {
canvas = new PCanvas();
getContentPane().add(canvas, BorderLayout.CENTER);
}
layer = canvas.getLayer();
canvas.setBackground(Color.BLACK);
// Disable antialiasing to make the lines more visible.
canvas.setDefaultRenderQuality(PPaintContext.LOW_QUALITY_RENDERING);
setLayout(new BorderLayout());
setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
validate();
canvas.requestFocus();
// Manipulation of Piccolo's scene graph should be done from Swings
// event dispatch thread since Piccolo is not thread safe. This code calls
// initialize() from that thread once the PFrame is initialized, so you are
// safe to start working with Piccolo in the initialize() method.
SwingUtilities.invokeLater(new Runnable() {
public void run() {
repaint();
}
});
setSize(winSizeX, winSizeY);
setLocation(posX, posY);
setTitle(name);
// Add a Scrollpane to be able to scroll within the canvas
PScrollPane scrollPane = new PScrollPane(canvas);
getContentPane().add(scrollPane);
scrollPane.setWheelScrollingEnabled(false);
PCamera lc = canvas.getCamera();
lc.scaleViewAboutPoint(initialScalingfactor, 0, 0);
// Disable the default event handlers and add our own.
addWindowListener(svEventHandler);
canvas.removeInputEventListener(canvas.getPanEventHandler());
canvas.removeInputEventListener(canvas.getZoomEventHandler());
canvas.addInputEventListener(svEventHandler);
canvas.addKeyListener(svEventHandler);
// Make the window visible.
validate();
setVisible(true);
}
/**
* Convenience function to add a message box to the window which can be used
* to output debug information.
*/
public void addMessageBox() {
if (ta == null) {
ta = new TextArea();
ta.setEditable(false);
getContentPane().add(ta, BorderLayout.SOUTH);
}
// We need to make the window bigger to accommodate the message box.
winSizeY += DEF_MESSAGEBOX_HEIGHT;
setSize(winSizeX, winSizeY);
}
/**
* Allows you to specify the thickness with which to draw lines, recantgles
* and ellipses.
* @param width The new thickness.
*/
public void setStrokeWidth(float width) {
// If this worked we wouldn't need the antialiased rendering off.
// stroke = new PFixedWidthStroke(width);
stroke = new BasicStroke(width);
}
/**
* Draw an ellipse at (x,y) with given width and height, using the
* current stroke, the current brush color to fill it and the
* current pen color for the outline.
*/
public void drawEllipse(int x, int y, int width, int height) {
PPath pn = PPath.createEllipse(x, y, width, height);
pn.setStrokePaint(currentPenColor);
pn.setStroke(stroke);
pn.setPaint(currentBrushColor);
layer.addChild(pn);
}
/**
* Draw the image with the given name at (x,y). Any image loaded stays in
* memory, so if you intend to redraw an image, you do not have to use
* createImage again.
*/
public void drawImage(PImage img, int xPos, int yPos) {
img.setX(xPos);
img.setY(yPos);
layer.addChild(img);
}
/**
* Draw a line from (x1,y1) to (x2,y2) using the current pen color and stroke.
*/
public void drawLine(int x1, int y1, int x2, int y2) {
PPath pn = PPath.createLine(x1, y1, x2, y2);
pn.setStrokePaint(currentPenColor);
pn.setPaint(null); // Null paint may render faster than the default.
pn.setStroke(stroke);
pn.moveTo(x1, y1);
pn.lineTo(x2, y2);
layer.addChild(pn);
}
/**
* Draw a rectangle given the two points (x1,y1) and (x2,y2) using the current
* stroke, pen color for the border and the brush to fill the
* interior.
*/
public void drawRectangle(int x1, int y1, int x2, int y2) {
if (x1 > x2) {
int t = x1;
x1 = x2;
x2 = t;
}
if (y1 > y2) {
int t = y1;
y1 = y2;
y2 = t;
}
PPath pn = PPath.createRectangle(x1, y1, x2 - x1, y2 - y1);
pn.setStrokePaint(currentPenColor);
pn.setStroke(stroke);
pn.setPaint(currentBrushColor);
layer.addChild(pn);
}
/**
* Draw some text at (x,y) using the current pen color and text attributes. If
* the current font does NOT support at least one character, it tries to find
* a font which is capable of displaying it and use that to render the text.
* Note: If the font says it can render a glyph, but in reality it turns out
* to be crap, there is nothing we can do about it.
*/
public void drawText(int x, int y, String text) {
int unreadableCharAt = -1;
char[] chars = text.toCharArray();
PText pt = new PText(text);
pt.setTextPaint(currentPenColor);
pt.setFont(currentFont);
// Check to see if every character can be displayed by the current font.
for (int i = 0; i < chars.length; i++) {
if (!currentFont.canDisplay(chars[i])) {
// Set to the first not displayable character.
unreadableCharAt = i;
break;
}
}
// Have to find some working font and use it for this text entry.
if (unreadableCharAt != -1) {
Font[] allfonts =
GraphicsEnvironment.getLocalGraphicsEnvironment().getAllFonts();
for (int j = 0; j < allfonts.length; j++) {
if (allfonts[j].canDisplay(chars[unreadableCharAt])) {
Font tempFont =
new Font(allfonts[j].getFontName(), currentFont.getStyle(),
currentFont.getSize());
pt.setFont(tempFont);
break;
}
}
}
pt.setX(x);
pt.setY(y);
layer.addChild(pt);
}
/** Set the pen color to an RGB value */
public void pen(int red, int green, int blue) {
pen(red, green, blue, 255);
}
/** Set the pen color to an RGBA value */
public void pen(int red, int green, int blue, int alpha) {
currentPenColor = new Color(red, green, blue, alpha);
}
/**
* Define how to display text. Note: underlined is not currently not supported
*/
public void textAttributes(String font, int pixelSize, boolean bold,
boolean italic, boolean underlined) {
// For legacy reasons convert "Times" to "Times New Roman"
if (font.equals("Times")) {
font = "Times New Roman";
}
int style = Font.PLAIN;
if (bold) {
style += Font.BOLD;
}
if (italic) {
style += Font.ITALIC;
}
currentFont = new Font(font, style, pixelSize);
}
/**
* Zoom the window to the rectangle given the two points (x1,y1)
* and (x2,y2), which must be greater than (x1,y1).
*/
public void zoomRectangle(int x1, int y1, int x2, int y2) {
if (x2 > x1 && y2 > y1) {
winSizeX = getWidth();
winSizeY = getHeight();
int width = x2 - x1;
int height = y2 - y1;
// Since piccolo doesn't do this well either, pad with a margin
// all the way around.
int wmargin = width / 2;
int hmargin = height / 2;
double scalefactor = Math.min(winSizeX / (2.0 * wmargin + width),
winSizeY / (2.0 * hmargin + height));
PCamera lc = canvas.getCamera();
lc.scaleView(scalefactor / lc.getViewScale());
lc.animateViewToPanToBounds(new Rectangle(x1 - hmargin, y1 - hmargin,
2 * wmargin + width,
2 * hmargin + height), 0);
}
}
/**
* Flush buffers and update display.
*
* Only actually reacts if there are no more messages in the stack, to prevent
* the canvas from flickering.
*/
public void update() {
// TODO(rays) fix bugs in piccolo or use something else.
// The repaint function generates many
// exceptions for no good reason. We catch and ignore as many as we
// can here, but most of them are generated by the system repaints
// caused by resizing/exposing parts of the window etc, and they
// generate unwanted stack traces that have to be piped to /dev/null
// (on linux).
try {
repaint();
} catch (NullPointerException e) {
// Do nothing so the output isn't full of stack traces.
} catch (IllegalPathStateException e) {
// Do nothing so the output isn't full of stack traces.
}
}
/** Adds a checkbox entry to the menubar, c.f. SVMenubar.add(...) */
public void addMenuBarItem(String parent, String name, int id,
boolean checked) {
svMenuBar.add(parent, name, id, checked);
}
/** Adds a submenu to the menubar, c.f. SVMenubar.add(...) */
public void addMenuBarItem(String parent, String name) {
addMenuBarItem(parent, name, -1);
}
/** Adds a new entry to the menubar, c.f. SVMenubar.add(...) */
public void addMenuBarItem(String parent, String name, int id) {
if (svMenuBar == null) {
svMenuBar = new SVMenuBar(this);
}
svMenuBar.add(parent, name, id);
}
/** Add a message to the message box. */
public void addMessage(String message) {
if (ta != null) {
ta.append(message + "\n");
} else {
System.out.println(message + "\n");
}
}
/**
* This method converts a string which might contain hexadecimal values to a
* string which contains the respective unicode counterparts.
*
* For example, Hall0x0094chen returns Hall<o umlaut>chen
* encoded as utf8.
*
* @param input The original string, containing 0x values
* @return The converted string which has the replaced unicode symbols
*/
private static String convertIntegerStringToUnicodeString(String input) {
StringBuffer sb = new StringBuffer(input);
Pattern numbers = Pattern.compile("0x[0-9a-fA-F]{4}");
Matcher matcher = numbers.matcher(sb);
while (matcher.find()) {
// Find the next match which resembles a hexadecimal value and convert it
// to
// its char value
char a = (char) (Integer.decode(matcher.group()).intValue());
// Replace the original with the new character
sb.replace(matcher.start(), matcher.end(), String.valueOf(a));
// Start again, since our positions have switched
matcher.reset();
}
return sb.toString();
}
/**
* Show a modal input dialog. The answer by the dialog is then send to the
* client, together with the associated menu id, as SVET_POPUP
*
* @param msg The text that is displayed in the dialog.
* @param def The default value of the dialog.
* @param id The associated commandId
* @param evtype The event this is associated with (usually SVET_MENU
* or SVET_POPUP)
*/
public void showInputDialog(String msg, String def, int id,
SVEventType evtype) {
svEventHandler.timer.stop();
String tmp =
(String) JOptionPane.showInputDialog(this, msg, "",
JOptionPane.QUESTION_MESSAGE, null, null, def);
if (tmp != null) {
tmp = convertIntegerStringToUnicodeString(tmp);
SVEvent res = new SVEvent(evtype, this, id, tmp);
ScrollView.addMessage(res);
}
svEventHandler.timer.restart();
}
/**
* Shows a modal input dialog to the user. The return value is automatically
* sent to the client as SVET_INPUT event (with command id -1).
*
* @param msg The text of the dialog.
*/
public void showInputDialog(String msg) {
showInputDialog(msg, null, -1, SVEventType.SVET_INPUT);
}
/**
* Shows a dialog presenting "Yes" and "No" as answers and returns either a
* "y" or "n" to the client.
*
* Closing the dialog without answering is handled like "No".
*
* @param msg The text that is displayed in the dialog.
*/
public void showYesNoDialog(String msg) {
// res returns 0 on yes, 1 on no. Seems to be a bit counterintuitive
int res =
JOptionPane.showOptionDialog(this, msg, "", JOptionPane.YES_NO_OPTION,
JOptionPane.QUESTION_MESSAGE, null, null, null);
SVEvent e = new SVEvent(SVEventType.SVET_INPUT, this, 0, 0, 0, 0,
res == 0 ? "y" : "n");
ScrollView.addMessage(e);
}
/** Adds a submenu to the popup menu, c.f. SVPopupMenu.add(...) */
public void addPopupMenuItem(String parent, String name) {
if (svPuMenu == null) {
svPuMenu = new SVPopupMenu(this);
}
svPuMenu.add(parent, name, -1);
}
/** Adds a new menu entry to the popup menu, c.f. SVPopupMenu.add(...) */
public void addPopupMenuItem(String parent, String name, int cmdEvent,
String value, String desc) {
if (svPuMenu == null) {
svPuMenu = new SVPopupMenu(this);
}
svPuMenu.add(parent, name, cmdEvent, value, desc);
}
/** Destroys a window. */
public void destroy() {
ScrollView.addMessage(new SVEvent(SVEventType.SVET_DESTROY, this, 0,
"SVET_DESTROY"));
setVisible(false);
// dispose();
}
}
| 21,612 | 32.302003 | 97 | java |
featConstr | featConstr-master/src/featconstr/ClassifierAcc.java | package featconstr;
import weka.classifiers.Classifier;
/**
*
* @author bostjan
*/
public class ClassifierAcc {
Classifier c;
double acc;
public ClassifierAcc(Classifier c, double acc) {
this.c = c;
this.acc = acc;
}
}
| 260 | 13.5 | 52 | java |
featConstr | featConstr-master/src/featconstr/DMatrixLoader.java | package featconstr;
import ml.dmlc.xgboost4j.java.DMatrix;
import ml.dmlc.xgboost4j.java.XGBoostError;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public class DMatrixLoader {
public static DMatrix instancesToDMatrix(Instances instances) throws XGBoostError{
long[] rowHeaders = new long[instances.size()+1];
rowHeaders[0]=0;
List<Float> dataList = new ArrayList<>();
List<Integer> colList = new ArrayList<>();
float[] labels = new float[instances.size()];
for(int i=0; i<instances.size(); i++) {
Instance instance = instances.get(i);
rowHeaders[i] = dataList.size();
processInstance(instance, dataList, colList);
labels[i] = (float) instance.classValue();
}
rowHeaders[rowHeaders.length - 1] = dataList.size();
int colNum = instances.numAttributes()-1;
DMatrix dMatrix = createDMatrix(rowHeaders, dataList, colList, colNum);
dMatrix.setLabel(labels);
return dMatrix;
}
public static DMatrix instanceToDMatrix(Instance instance) throws XGBoostError {
List<Float> dataList = new ArrayList<>();
List<Integer> colList = new ArrayList<>();
processInstance(instance, dataList, colList);
long[] rowHeaders = new long[]{0, dataList.size()};
int colNum = instance.numAttributes()-1;
return createDMatrix(rowHeaders, dataList, colList, colNum);
}
protected static DMatrix createDMatrix(long[] rowHeaders, List<Float> dataList, List<Integer> colList, int colNum) throws XGBoostError {
float[] data = new float[dataList.size()];
int[] colIndices = new int[dataList.size()];
colIndices[0] = 0;
for(int i=0; i<dataList.size(); i++) {
data[i] = dataList.get(i);
colIndices[i] = colList.get(i);
}
return new DMatrix(rowHeaders, colIndices, data, DMatrix.SparseType.CSR, colNum);
}
protected static void processInstance(Instance instance, List<Float> dataList, List<Integer> colList ){
Attribute classAttribute = instance.classAttribute();
int classAttrIndex = classAttribute.index();
Enumeration<Attribute> attributeEnumeration = instance.enumerateAttributes();
while (attributeEnumeration.hasMoreElements()){
Attribute attribute = attributeEnumeration.nextElement();
int attrIndex = attribute.index();
if(attrIndex == classAttrIndex)
continue;
double value = instance.value(attribute);
if (value == 0)
continue;
dataList.add((float) value);
if (attrIndex < classAttrIndex)
colList.add(attrIndex);
else
colList.add(attrIndex+1);
}
}
}
| 2,975 | 34.855422 | 140 | java |
featConstr | featConstr-master/src/featconstr/FeatConstr.java | package featconstr;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.PrintWriter;
import weka.core.Instances;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import weka.core.*;
import weka.classifiers.*;
import weka.filters.unsupervised.attribute.Remove;
import weka.filters.Filter;
import weka.classifiers.trees.RandomForest;
import weka.classifiers.bayes.NaiveBayes;
import java.util.Map;
import java.util.Random;
import weka.filters.unsupervised.attribute.Add;
import java.util.Collections;
import java.util.HashSet;
import org.apache.commons.lang3.ArrayUtils; //commons-lang3-3.11.jar
import weka.classifiers.trees.J48;
import java.util.stream.Collectors;
import weka.attributeSelection.AttributeSelection;
import weka.attributeSelection.Ranker;
import weka.attributeSelection.ReliefFAttributeEval;
import java.awt.Image;
import java.awt.image.RenderedImage;
import java.io.OutputStream;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Enumeration;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.Set;
import java.util.TreeMap;
import javax.imageio.ImageIO;
import org.ghost4j.converter.PDFConverter; //ghost4j-1.0.1.jar ... for converting svg to pdf
import org.ghost4j.document.PDFDocument;
import org.ghost4j.document.PSDocument;
import org.ghost4j.document.PaperSize;
import org.ghost4j.renderer.SimpleRenderer;
import org.paukov.combinatorics3.Generator;
import com.github.rcaller.rstuff.RCaller; //RCaller-3.0.2.jar ... for calling MDL for discrete and contionous features
import com.github.rcaller.rstuff.RCode;
import java.net.URL;
import weka.attributeSelection.GainRatioAttributeEval;
import weka.attributeSelection.InfoGainAttributeEval;
import weka.classifiers.functions.MultilayerPerceptron;
import weka.classifiers.rules.Rule;
import weka.core.converters.ConverterUtils.DataSink;
import weka.classifiers.rules.FURIA; //fuzzyUnorderedRuleInduction.jar
import weka.filters.supervised.instance.StratifiedRemoveFolds;
import weka.filters.unsupervised.attribute.NominalToBinary;
import weka.filters.unsupervised.instance.RemoveRange;
import weka.filters.unsupervised.instance.RemoveWithValues;
import ml.dmlc.xgboost4j.java.DMatrix;
import ml.dmlc.xgboost4j.java.XGBoost;
import ml.dmlc.xgboost4j.java.XGBoostError;
import ml.dmlc.xgboost4j.java.Booster;
import java.util.HashMap;
import java.util.Vector;
import java.util.logging.Level;
import java.util.logging.Logger;
import weka.classifiers.functions.SMO;
import weka.classifiers.functions.supportVector.PolyKernel;
import weka.classifiers.functions.supportVector.RBFKernel;
import weka.classifiers.lazy.IBk;
import weka.filters.unsupervised.attribute.ReplaceMissingValues;
/**
*
* @author Boštjan Vouk
*/
@SuppressWarnings({"rawtypes", "unchecked", "serial"})
public class FeatConstr {
/**************************** main EFC parameters *********************************/
public static boolean justExplain=false; //just explain datasets, construct features and evaluate them
public static boolean visualisation=false; //visualisation of explanations using IME method
public static boolean exhaustive=false; //try exhaustive search ... all combinations between attributes
public static boolean jakulin=false; //try exhaustive search, calculate interaction information between all comb. of attributes; Jakulin, A. (2005). Machine learning based on attribute interactions [Doctoral dissertation, University of Ljubljana]. ePrints.FRI. https://bit.ly/3eiJ18x
/*****************************************************************************/
public static boolean groupsByThrStat=true; //print statistics about groups (identified by EFC) by thresholds
public static boolean writeAccByFoldsInFile=true; //for analysing results of statistical tests
public static boolean saveConstructs=true; //save generated features with attributes into new dataset ("dataset name"-origPlusRen1stLFeat-"time-date".arff)
public static boolean renameGenFeat=true; //rename generated features (e.g., F1, F2 ...), available only if saveConstructs=true, for potential generation of 2nd level features; input dataset for generating 2nd level feat must contain "origPlusRen1stLFeat" string
/**************************** explanation parameters *********************************/
public static boolean treeSHAP=true; //if false then set predictionModel for explanations with IME method (default is RF; set different model in the 436 line) (default true)
public static boolean explAllData=false;
public static boolean explAllClasses=false;
/**************************** IME parameters *********************************/
public enum IMEver{equalSampling, adaptiveSamplingSS, adaptiveSamplingAE, aproxErrSampling};
//equalSampling - each attribute has same num. of samples, Algorithm 1 in in Štrumbelj, Erik, and Igor Kononenko. "An efficient explanation of individual classifications using game theory." The Journal of Machine Learning Research 11 (2010): 1-18.
//adaptiveSampling - Algorithm 2 in Štrumbelj, Erik, and Igor Kononenko. "Explaining prediction models and individual predictions with feature contributions." Knowledge and information systems 41.3 (2014): 647-665.
//adaptiveSamplingSS - stopping criteria is sum of samples
//adaptiveSamplingAE - stopping criteria is approxamization error for all attributes
//aproxErrSampling - we calculate samples for each attribute mi=(<1-alpha, e>) (article 2010) in Štrumbelj, Erik, and Igor Kononenko. "An efficient explanation of individual classifications using game theory." The Journal of Machine Learning Research 11 (2010): 1-18.
public static IMEver method=IMEver.equalSampling; //selected IME method
public static int N_SAMPLES=1000; //if we use equalSampling ... number of samples, we choose random value from interval min-max N_SAMPLE times
public static int minS=10; //min samples ... if we use sumOfSamples and diffSampling ... to obtain an approximate estimate of the variance
public static int sumOfSmp=2000; //sum of samples ... if we use adaptive sampling ... sumOfSmp >= n*minS ... n is number of attributes
public static int pctErr=95; //90, 95 or 99;
public static double error=0.01;
/**************************** XGBoost parameters *********************************/
public static int numOfRounds=100; //XGBoost parameter - number of decision trees
public static int maxDepth=3; //XGBoost parameter - size of decision trees
public static double eta=0.3; //XGBoost parameter - shrinkage
public static double gamma=1; //XGBoost parameter - gamma
/**************************** visualisation parameters *********************************/
public static int visFrom=1, visTo=10; //visualize instances from visFrom to visTo
public static int drawLimit=20; //we draw (max.) 20 the most important attributes
public static int topHigh=10; //visualise features with highest contributions (instance explanation)
public static int numOfImpt=6; //visualise features with highest contributions ...
public static int RESOLUTION=100; //density for model visualisation
public static boolean pdfPng=true; //besided eps, print also pdf and png
public static Classifier visualModel; //model for visualisation (set it in the 437 line)
/**************************** additional EFC parameters *********************************/
public static double attrImpThrs[]={0,0.25,0.5}; //evaluation thresholds, used only in feature selection setting
public static boolean evalFeatDuringFC=false; //enable feature evaluation during FC process
public static double featThr=0.1; //evaluation threshold (use of MDL), useful only when evalFeatDuringFC is enabled (true)
public static double thrL=0.1; //weight threshold - lower (default 0.4)
public static double thrU=0.8; //weight threshold - upper (default 0.8)
public static double step=0.1; //step for traversing all thresholds from thrL to thrU
public static double NOISE=1; //(default 1) "lower" groups that have less than noiseThr% groups are removed ... noiseThr=0 (we take all groups); noiseThr=(numInst*NOISE)/100.0;
public static int minNoise=3; //(default 3) minimum number of groups at noiseThr
public static int minMinNoise=1; //(default 1) if numInst<minExplInst then minNoise=minMinNoise
public static int minExplInst=50; //if numInst<minExplInst then minNoise=minMinNoise
public static int maxToExplain=500; //(default 500) max instances to explain if we have more than 500 instances to explain from the class
public static int instThr=10; //e.g. 10% ... we explain minority class, if minority class has at least percent of instThr instances
/**************************** evaluation parameters *********************************/
public static int folds=10; //for generating models, folds=1 means no CV and using split in ratio listed below
public static int splitTrain=4; //5 ... 80%:20%, 4 ... 75%25%, 3 ... 66%:33%; useful only when folds are set to 1, meaning no CV and using split
public static int splitTrainFS=4; //5 ... 80%:20%, 4 ... 75%25%, 3 ... 66%:33%; used when feature selection is used on the validation dataset
/**************************** FURIA parameters *********************************/
public static double cf=0.9; //confidence factor (FURIA) (default: 0.5)
public static double pci=0.9; //percentage of covered instances (FURIA)
public static boolean covering=true; //covering=true -> if all instances are covered by features generated by FURIA we stop construction with FURIA
public static boolean featFromExplClass=true; //for generate FURIA features ... for ablation study featFromExplClass=false this means that we take features from all classes
/**************************** types of features and construction depth *************/
//include/exclude specific types of features; at least one type must be selected
public static boolean logFeat=true; //generate logical operators features
public static boolean decRuleFeat=true; //generate decision rules features
public static boolean thrFeat=true; //generate threshold features
public static boolean relatFeat=false; //generate relational features
public static boolean cartFeat=false; //generate Cartesian product features
public static boolean numerFeat=false; //generate numerical features
//include/exclude specific features
public static String [] operationLogUse={"EQU","XOR","IMPL"}; //logical operators - for composing new features; full set is: "AND","OR","EQU","XOR","IMPL"
public static String [] operationRelUse={"LESSTHAN","DIFF"}; //relational operators - for composing new features; full set is: "LESSTHAN","DIFF"
public static String [] operationNumUse={"ADD","SUBTRACT","DIVIDE"}; //numeric operators - for composing new features; full set is:"ADD","SUBTRACT","DIVIDE","ABSDIFF"
public static int featDepth=2; //3 means 2 and 3, 4 means 2, 3, and 4 ... if featDepth is less than 3 then the construction depth is 2; higher depth is used only for conjunction and disjunction
/*****************************************************************************/
public static int classToExplain=1; //default is second class but this value is changed due to heuristic - explain minority class if class has at least instThr pct instances
public static int timeLimit=10800000; //10800000ms = 3h
public static int numInst; //number of instances in explained class; is set when classToExplain is defined
public enum OperationLog{AND,OR,EQU,XOR,IMPL};
public enum OperationRel{LESSTHAN,DIFF};
public enum OperationNum{ADD,SUBTRACT,DIVIDE,ABSDIFF};
public static String datasetName;
public static String tmpDir;
public static List<String> listOfConcepts;
public static String fileName;
public static long modelBuildTime[];
public static double accOrigModelByFolds[][];
public static double accExplAlgInt[]; //for internal evaluation of the explanation alg
public static double accExplAlgTest[]; //for evaluation of the explanation alg
public static double oobRF[]; //for internal evaluation of the RF explanation alg - out of bag
public static double accuracyByFolds[][];
public static double accuracyByFoldsPS[][];
public static double accuracyByFoldsFuriaThr[][];
public static double accByFoldsLF[][]; //for measuring accuracy for method Logical features
public static double accByFoldsCP[][]; //for Cartesian product
public static double accByFoldsRE[][]; //for relational features
public static double accByFoldsNum[][]; //for numerical features
public static double featByFoldsPS[][][];
public static double numberOfFeatByFolds[][]; //0-logical, 1-threshold, 2-FURIA ... 5-numerical
public static double numOfFeatByFoldsLF[]; //number of logical features per folds (for Logical features method)
public static double numFeatByFoldsCP[]; //number of features from Cartesian product
public static double numFeatByFoldsRE[]; //number of relational features
public static double numFeatByFoldsNum[]; //number of numerical features
public static double numberOfTreeByFoldsPS[][]; //0-tree size, 1-num of leaves, 2-sum of terms
public static double numOfTreeByFoldsLF[][]; //0-tree size, 1-num of leaves, 2-sum of terms (for Logical features method)
public static double numOfTreeByFoldsCP[][]; //0-tree size, 1-num of leaves, 2-sum of terms (for Cartesian product)
public static double numOfTreeByFoldsRE[][]; //0-tree size, 1-num of leaves, 2-sum of terms (for relational feat)
public static double numOfTreeByFoldsNum[][]; //0-tree size, 1-num of leaves, 2-sum of terms (for numerical feat)
public static double numCartFeatInTreeFS[][]; //0-number of Cartesian features in tree, 1 sum of constructs
public static double numTreeByFoldsFuriaThr[][];
public static double numberOfTreeByFolds[][]; //0-tree size, 1-num of leaves, 2-sum of terms
public static double numberOfUnImpFeatByFolds[][]; //0 - or, 1 - equ, 2 - xor, 3 - impl, 4 - and, 5 - lessthan, 6 - relational, 7 - Cartesian
public static double numFeatByFoldsFuriaThr[][];
public static long exlpTime[], allFCTime[], allFCTimeLF[], numericalFCTime[], cartesianFCTime[], relationalFCTime[], furiaThrTime[];
public static long learnAllFCTime[][], learnAllFCTimeLF[][], learnAllFCTimeNum[][], learnAllFCTimeCP[][], learnAllFCTimeRE[][], learnFuriaThrTime[][];
public static double numOfExplainedInst[];
public static double numOfRulesByFolds[];
public static double numOfCartesian[]; //number of Cartesian features in tree - when we generate just Cartesian
public static double numOfRelational[]; //number of relational features in tree - when we generate just relational feat
public static double numOfNumerical[]; //number of numerical features in tree - when we generate just numerical feat
public static double numOfCartFAll[]; //number of Cartesian features in tree - when we generate all features
public static double numOfRelInTreeAll[]; //number of relational features in tree - when we generate all features
public static double numOfNumInTreeAll[]; //number of numerical features in tree - when we generate all features
public static double sumOfConstrCart[]; //sum of constructs (from Cartesian) in tree - when we generate just Cartesian
public static double sumOfConstrRel[]; //sum of constructs (from relational feat) in tree - when we generate just relatioan feat
public static double sumOfConstrNum[]; //sum of constructs (from numerical feat) in tree - when we generate just numerical feat
public static double sumOfConstrRelAll[]; //sum of constructs (from relational feat) in tree - when we generate all feat
public static double sumOfConstrCartAll[]; //sum of constructs (from Cartesian) in tree - when we generate all features
public static double numOfLogicalInTree[][]; //number of logical features in tree - when we generate just logical
public static double numOfLogInTreeAll[]; //number of logical features in tree - when we generate all features
public static double sumOfConstrLFAll[];
public static double sumOfConstrNumAll[]; //sum of constructs (from numerical) in tree - when we generate all features
public static double numLogFeatInTreeFS[][]; //number of logical features in trees (feature selection) and constructs
public static double numNumFeatInTreeFS[][]; //number of numerical features in trees (feature selection) and constructs
public static double numRelFeatInTreeFS[][]; //number of relational features in trees (feature selection) and constructs
public static double numOfRulesByFoldsLF[]; //for method Logical features
public static double numOfTermsByFoldsLF[];
public static double numOfRatioByFoldsLF[];
public static double numOfRulesByFoldsCP[]; //for Cartesian product
public static double numOfTermsByFoldsCP[];
public static double numOfRatioByFoldsCP[];
public static double numOfRulesByFoldsRE[]; //for relational feat
public static double numOfTermsByFoldsRE[];
public static double numOfRatioByFoldsRE[];
public static double numOfRulesByFoldsNum[]; //for numerical feat
public static double numOfTermsByFoldsNum[];
public static double numOfRatioByFoldsNum[];
public static double numOfTermsByFoldsF[]; //number of terms of constructs in FURIA features
public static double numOfRatioByFoldsF[];
public static double numOfFuriaThrInTreeByFolds[][]; //0-num of FURIA feat, 1-sum of terms of FURIA feat, 2-num of thr feat, 3-sum of terms in thr feat ... in All setting
public static double numOfFuriaThrInTreeByFoldsF[][]; //0-num of FURIA feat, 1-sum of terms of FURIA feat, 2-num of thr feat, 3-sum of terms in thr feat ... if DR or thr feat. are generated
public static double numOfFuriaThrInTreeByFoldsP[][]; //0-num of FURIA feat, 1-sum of terms of FURIA feat, 2-num of thr feat, 3-sum of terms in thr feat ... in PS/FS setting
public static long learnAllTime[][];
public static double treeSize[], numOfLeaves[], sumOfTerms[], ratioTermsNodes[], numOfRules[], numOfTerms[], numConstructsPerRule[];
public static long paramSearchTime[][], paramSLearnT[][];
public static double complexityOfFuria[][], complexityOfFuriaPS[][];
public static double maxGroupOfConstructs[];
public static double numOfGroupsOfFeatConstr[];
public static double avgTermsPerGroup[];
public static int avgTermsPerFold[];
public static Set unInfFeatures = new HashSet(); //for controlling informative features
public static int processors;
public static ArrayList<Double>[] dotsA;
public static ArrayList<Double>[] dotsB;
public static String nThHigh;
public static PrintWriter logFile, impGroups, impGroupsKD, discIntervalsKD, attrImpListMDL, attrImpListMDL_KD, attrImpListReliefF, attrImpListReliefF_KD, bestParamPerFold, discIntervals, accByFolds,groupsStat;
public static void main(String[] args) throws Exception {
URL myURL = new URL("https://github.com/bostjanv76/featConstr");
/**************************** check the correct setting of EFC *********************************/
if((justExplain==false && visualisation==false && exhaustive==false && jakulin==true)){
System.out.println("\u001B[31mYou must set the correct values of the parameters from the following list of settings and run the program again!\u001B[0m");
System.out.println("\t1) \u001B[34mEFC\033[0m \t\t\t\t\t(justExplain=false, visualisation=false, exhaustive=false, jakulin=false)");
System.out.println("\t2) \u001B[34mFC based on exhaustive search\033[0m \t(justExplain=false, visualisation=false, exhaustive=true, jakulin=false)");
System.out.println("\t3) \u001B[34mFC based on interaction information\033[0m \t(justExplain=false, visualisation=false, exhaustive=true, jakulin=true)");
System.out.println("\t4) \u001B[34mKnowledge discovery\033[0m \t\t\t(justExplain=true, visualisation=false)");
System.out.println("\t5) \u001B[34mVisualisation\033[0m \t\t\t(justExplain=false, visualisation=true)");
System.out.println("For more instructions, please see "+myURL);
System.exit(0);
}
if(logFeat==false && decRuleFeat==false && thrFeat==false && relatFeat==false && cartFeat==false && numerFeat==false && !jakulin && !exhaustive){
System.out.println("\u001B[31mAt least one type of features must be selected/true!\u001B[0m");
System.out.println("\u001B[34mTip: Set at least one of the following flags to true.\033[0m");
System.out.println("\t1) \u001B[34mlogFeat\033[0m");
System.out.println("\t2) \u001B[34mdecRuleFeat\033[0m");
System.out.println("\t3) \u001B[34mthrFeat\033[0m");
System.out.println("\t4) \u001B[34mrelatFeat\033[0m");
System.out.println("\t5) \u001B[34mcartFeat\033[0m");
System.out.println("\t6) \u001B[34mnumerFeat\033[0m");
System.out.println("For more instructions, please see "+myURL);
System.exit(0);
}
tmpDir = System.getProperty("java.io.tmpdir");
deleteXGBdll(); //if xgboost4j dll exists in temp folder, delete it
String folderName="logs/";
if((justExplain==false && visualisation==false && exhaustive==false && jakulin==false))
folderName="logs/efc/";
if((justExplain==false && visualisation==false && exhaustive==true && jakulin==false))
folderName="logs/exhaustive/";
if((justExplain==false && visualisation==false && exhaustive==true && jakulin==true))
folderName="logs/jakulin/";
String lg = new SimpleDateFormat("HH.mm.ss-dd.MM.yyyy").format(new Date());
if(!justExplain && !visualisation){
logFile= new PrintWriter(new FileWriter(folderName+"report-"+lg+".log"));
if(!exhaustive && !jakulin){
bestParamPerFold= new PrintWriter(new FileWriter(folderName+"params-"+lg+".dat"));
if(groupsByThrStat)
groupsStat = new PrintWriter(new FileWriter(folderName+"groupsStat-"+lg+".csv",true)); //number of identified groups by EFC for each threshold and each fold
}
}
if((justExplain && visualisation) || justExplain){
impGroupsKD = new PrintWriter(new FileWriter(folderName+"kd/impGroups-"+lg+".log"));
attrImpListMDL_KD = new PrintWriter(new FileWriter(folderName+"kd/attrImpListMDL-"+lg+".dat"));
attrImpListReliefF_KD = new PrintWriter(new FileWriter(folderName+"kd/attrImpListReliefF-"+lg+".dat"));
discIntervalsKD = new PrintWriter(new FileWriter(folderName+"kd/discretizationIntervals-"+lg+".dat"));
}
else if(!visualisation && !justExplain){
impGroups = new PrintWriter(new FileWriter(folderName+"impGroups-"+lg+".log"));
if(!jakulin){
attrImpListMDL = new PrintWriter(new FileWriter(folderName+"attrImpListMDL-"+lg+".dat"));
attrImpListReliefF = new PrintWriter(new FileWriter(folderName+"attrImpListReliefF-"+lg+".dat"));
}
discIntervals = new PrintWriter(new FileWriter(folderName+"discretizationIntervals-"+lg+".dat"));
}
File folder;
Timer t1;
Timer tTotal=new Timer();
double [] classDistr;
RCaller rCaller = RCaller.create(); //open RCaller only once and close it at the end of the program
RCode code = RCode.create();
boolean isClassification=true;
//classification datasets
/*****demo datasets*****/
//folder = new File("datasets/demo");
/*****toy datasets*****/
folder = new File("datasets/toy");
/*****artificial datasets*****/
//folder = new File("datasets/artificial");
/*****UCI datasets*****/
//folder = new File("datasets/uci");
/*****real dataset - credit score*****/
//folder = new File("datasets/real");
File[] listOfFiles = folder.listFiles();
boolean noFiles=true;
for(File file : listOfFiles){
if(file.isFile()){
noFiles=false;
break;
}
}
if(noFiles) //check if analysed folder is empty
System.out.println("\u001B[31mPut dataset(s) in the selected folder and run the program again! The currently selected folder is "+folder.getName()+".\u001B[0m");
loopExplanationVisualisation:
for(File file : listOfFiles){
loopExhaustiveTooLong:
if(file.isFile()){
tTotal.start();
fileName=file.getName();
System.out.println("dataset: "+fileName);
if((justExplain && visualisation) || justExplain){
impGroupsKD.println("dataset: "+fileName);
attrImpListMDL_KD.println("dataset: "+fileName);
attrImpListReliefF_KD.println("dataset: "+fileName);
discIntervalsKD.println("dataset: "+fileName);
}
else if(!visualisation && !justExplain){
logFile.println("dataset: "+fileName);
if(!exhaustive && !jakulin){
bestParamPerFold.println("dataset: "+fileName);
if(groupsByThrStat)
groupsStat.println("dataset: "+fileName);
}
impGroups.println("dataset: "+fileName);
if(exhaustive && !jakulin)
impGroups.println("Exhaustive search");
if((!exhaustive && !jakulin) || (exhaustive && !jakulin)){
attrImpListMDL.println("dataset: "+fileName);
attrImpListReliefF.println("dataset: "+fileName);
}
discIntervals.println("dataset: "+fileName);
}
Classifier clsTab[]=null;
processors = Runtime.getRuntime().availableProcessors();
//classification
NaiveBayes nb=new NaiveBayes();
J48 j48=new J48();
FURIA furia=new FURIA(); //in WEKA API ruleset is given for the whole dataset, we will have rulesets for every fold
MultilayerPerceptron mp=new MultilayerPerceptron();
//wildcard values: 'a' = (attribs + classes) / 2, 'i' = attribs, 'o' = classes , 't' = attribs + classes (default: 'a')
//hiddenLayers=(attribs + classes) / 2 ... one hidden layer with (attribs + classes) / 2 units (neurons)
//mp.setHiddenLayers("10,5"); //e.g., two hidden layers, one with 10, the other with 5 units (neurons)
SMO svmLin=new SMO(); //SVM with Linear kernel (default kernel)
SMO svmPoly=new SMO(); //SVM with Polynomial kernel
PolyKernel p=new PolyKernel();
p.setExponent(2); //set the degree of the polynomial
svmPoly.setKernel(p);
SMO svmRBF=new SMO(); //SVM with RBF kernel
RBFKernel rbfKernel = new RBFKernel();
svmRBF.setKernel(rbfKernel); //set RBF kernel
IBk knn=new IBk();
knn.setKNN(10);
RandomForest rf=new RandomForest();
rf.setNumExecutionSlots(processors); //The number of execution slots (threads) to use for constructing the ensemble.
rf.setCalcOutOfBag(true);
Instances data = new Instances(new BufferedReader(new FileReader(file)));
data.setClassIndex(data.numAttributes()-1);
System.gc();
//just for any case
String oldName, newName;
for(int i=0;i<data.numAttributes();i++){
oldName=data.attribute(i).name();
if(oldName.toUpperCase().contains("==")){
newName=oldName.toUpperCase().replace("==", "-IS-");
data.renameAttribute(i, newName);
}
}
Classifier predictionModel=rf; //model for explanations when we use the IME method; list of included classifiers: nb, j48, furia, mp, svmLin, svmPoly, svmRBF, rf
visualModel=rf; //model for visualisations when we use the IME method; list of included classifiers: nb, j48, furia, mp, svmLin, svmPoly, svmRBF, rf
if(justExplain){
numberOfUnImpFeatByFolds=new double[8][folds];
ReplaceMissingValues rwm=new ReplaceMissingValues();
rwm.setInputFormat(data);
data=Filter.useFilter(data, rwm);
// attrImpListMDL_KD.println("MDL - before CI");
// mdlCORElearn(data, rCaller, code);
// attrImpListReliefF_KD.println("ReliefF - before CI");
// lowLevelReliefF(data);
Instances dataWithNewFeat=justExplainAndConstructFeat(data, predictionModel, true, rCaller, code); //knowledge discovery
if(saveConstructs){
Instances origNewfeat=null;
origNewfeat=new Instances(dataWithNewFeat);
String fName1, fName2, fName3, origName=fileName.substring(0, fileName.indexOf('.'));
folderName="logs/kd/";
String searchString1="origPlusRen", searchString2="LFeat";
int featLevel;
if(fileName.contains(searchString1) && fileName.contains(searchString2)){
int startIdx=fileName.indexOf(searchString1);
int endIdx=fileName.indexOf(searchString2)+searchString2.length();
String tmpString=fileName.substring(startIdx, endIdx);
featLevel=Integer.parseInt(tmpString.substring(searchString1.length(),searchString1.length()+1))+1; //increase feature level
fName1="origPlus"+featLevel+searchString2;
fName2="names-"+featLevel+"-level-feat";
fName3=searchString1+featLevel+searchString2;
origName=fileName.substring(0, fileName.indexOf("-"+searchString1));
}
else{
featLevel=1;
fName1 ="origPlus1LFeat";
fName2 ="names-1-level-feat";
fName3 ="origPlusRen1LFeat";
}
DataSink.write(folderName+origName+"-"+fName1+"-"+lg+".arff", origNewfeat);
if(renameGenFeat){
int iName=1;
String tmpAttrName;
int oldNumAttr=data.numAttributes()-1;
PrintWriter attNames= new PrintWriter(new FileWriter(folderName+fName2+"-"+lg+".dat"));
attNames.println("NEW \t OLD");
for(int i=oldNumAttr;i<dataWithNewFeat.numAttributes()-1;i++){
tmpAttrName="F"+iName+"L"+featLevel;
attNames.println(tmpAttrName+"\t"+dataWithNewFeat.attribute(i).name());
dataWithNewFeat.renameAttribute(i, tmpAttrName);
iName++;
}
attNames.close();
DataSink.write(folderName+origName+"-"+fName3+"-"+lg+".arff", dataWithNewFeat); //save the dataset with renamed features (F1L1, F2L1 ...) for the potential generating 2nd level features
}
}
continue loopExplanationVisualisation;
}
if(!visualisation){
System.out.println("Number of folds for testing (CV): "+folds);
logFile.println("Number of folds for testing (CV): "+folds);
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
}
if(data.classAttribute().isNumeric())
isClassification=false;
if(isClassification)
clsTab=new Classifier[]{j48, nb, furia, rf}; //default {j48, nb, furia, rf}; additional: svmLin, svmRBF, knn
accuracyByFolds=new double[clsTab.length][folds];
accuracyByFoldsPS=new double[clsTab.length][folds];
accuracyByFoldsFuriaThr=new double[clsTab.length][folds];
accByFoldsLF=new double[clsTab.length][folds];
accByFoldsCP=new double[clsTab.length][folds];
accByFoldsRE=new double[clsTab.length][folds];
accByFoldsNum=new double[clsTab.length][folds];
numberOfFeatByFolds=new double[6][folds];
numOfFeatByFoldsLF=new double[folds];
numFeatByFoldsCP=new double[folds];
numFeatByFoldsRE=new double[folds];
numFeatByFoldsNum=new double[folds];
numFeatByFoldsFuriaThr=new double[2][folds];
exlpTime=new long[folds]; allFCTime=new long[folds]; allFCTimeLF=new long[folds]; cartesianFCTime=new long[folds];relationalFCTime=new long[folds];
numericalFCTime=new long[folds]; furiaThrTime=new long[folds];
paramSearchTime=new long[clsTab.length][folds]; paramSLearnT=new long[clsTab.length][folds];
learnAllFCTime=new long[clsTab.length][folds]; learnAllFCTimeLF=new long[clsTab.length][folds];
learnAllFCTimeCP=new long[clsTab.length][folds];
learnAllFCTimeRE=new long[clsTab.length][folds];
learnAllFCTimeNum=new long[clsTab.length][folds];
learnFuriaThrTime=new long[clsTab.length][folds];
featByFoldsPS=new double[6][folds][clsTab.length];
numberOfTreeByFolds=new double[4][folds];
numOfTreeByFoldsLF=new double[4][folds];
numOfTreeByFoldsCP=new double[4][folds];
numOfTreeByFoldsRE=new double[4][folds];
numOfTreeByFoldsNum=new double[4][folds];
numTreeByFoldsFuriaThr=new double[4][folds];
numberOfTreeByFoldsPS=new double[4][folds];
numCartFeatInTreeFS=new double[2][folds];
numberOfUnImpFeatByFolds=new double[8][folds]; //OR, EQU, XOR, IMPL, AND, LESSTHAN, DIFF, CARTESIAN
maxGroupOfConstructs=new double[folds];
numOfGroupsOfFeatConstr=new double[folds];
numOfExplainedInst=new double[folds];
avgTermsPerGroup=new double[folds];
avgTermsPerFold=new int[folds];
numOfRulesByFolds=new double[folds]; numOfTermsByFoldsF=new double[folds];
numOfRulesByFoldsLF=new double[folds]; numOfTermsByFoldsLF=new double[folds]; numOfRatioByFoldsLF=new double[folds]; //for logical features
numOfRulesByFoldsCP=new double[folds]; numOfTermsByFoldsCP=new double[folds]; numOfRatioByFoldsCP=new double[folds]; //for Cartesian features
numOfRulesByFoldsRE=new double[folds]; numOfTermsByFoldsRE=new double[folds]; numOfRatioByFoldsRE=new double[folds]; //for relational features
numOfRulesByFoldsNum=new double[folds]; numOfTermsByFoldsNum=new double[folds]; numOfRatioByFoldsNum=new double[folds]; //for numerical features
complexityOfFuriaPS=new double[3][folds];
numOfFuriaThrInTreeByFolds=new double[4][folds];
numOfFuriaThrInTreeByFoldsF=new double[4][folds];
numOfFuriaThrInTreeByFoldsP=new double[4][folds];
learnAllTime=new long[clsTab.length][folds];
accOrigModelByFolds=new double[clsTab.length][folds];
accExplAlgInt=new double[folds]; accExplAlgTest=new double[folds];
complexityOfFuria=new double[3][folds];
treeSize=new double[folds]; numOfLeaves=new double[folds]; sumOfTerms=new double[folds]; ratioTermsNodes=new double[folds]; numOfRules=new double[folds];
numOfTerms=new double[folds]; numConstructsPerRule=new double[folds]; numOfRatioByFoldsF=new double[folds];
modelBuildTime=new long[folds];
oobRF=new double[folds];
numOfCartesian=new double[folds];
numOfRelational=new double[folds];
numOfNumerical=new double[folds];
numOfCartFAll=new double[folds];
numLogFeatInTreeFS=new double[2][folds]; //0-number of log features, 1-sum of constructs
numNumFeatInTreeFS=new double[2][folds]; //0-number of numerical features, 1-sum of constructs
numRelFeatInTreeFS=new double[2][folds]; //0-number of rel features, 1-sum of constructs
numOfLogicalInTree=new double[2][folds];
numOfLogInTreeAll=new double[folds];
numOfNumInTreeAll=new double[folds];
numOfRelInTreeAll=new double[folds];
sumOfConstrCart=new double[folds];
sumOfConstrRel=new double[folds];
sumOfConstrNum=new double[folds];
sumOfConstrCartAll=new double[folds];
sumOfConstrLFAll=new double[folds];
sumOfConstrNumAll=new double[folds];
sumOfConstrRelAll=new double[folds];
if(!visualisation){
System.out.println("number of instances: "+data.numInstances());
logFile.println("number of instances: "+data.numInstances());
System.out.println("number of attributes: "+(data.numAttributes()-1));
logFile.println("number of attributes: "+(data.numAttributes()-1));
System.out.println("number of classes: "+(data.numClasses()));
logFile.println("number of classes: "+(data.numClasses()));
}
data.setClassIndex(data.numAttributes()-1);
//HEURISTICS OF CLASS SELECTION FOR EXPLANATION
//array of frequencies for each class - how many instances occur in a particular class
classDistr=Arrays.stream(data.attributeStats(data.classIndex()).nominalCounts).asDoubleStream().toArray(); //we convert because we need in log2Multinomial as parameter double array
for(int i=0;i<minIndexClassifiers(classDistr).length;i++){
if(minIndexClassifiers(classDistr)[i].v>=Math.ceil(data.numInstances()*instThr/100.00)){ //we choose class to explain - class has to have at least instThr pct of whole instances
classToExplain=minIndexClassifiers(classDistr)[i].i;
break;
}
}
if(!visualisation){
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
}
//SPLIT TO TRAIN AND TEST - N fold CV or split in ratio, depends on what number of folds has been chosen
ReplaceMissingValues rwm;
Random rand = new Random(1);
Instances randData=new Instances(data);
Instances test;
randData.randomize(rand);
if(isClassification && folds>1)
randData.stratify(folds); //for imbalanced datasets before splitting the dataset into train and test set - same as WEKA GUI
/******************* VISUALISATION *********************************/
if(visualisation){
System.out.println("Drawing ...");
visualizeModelInstances(visualModel, data, true, RESOLUTION, numOfImpt, visFrom, visTo); //visualise explanations from e.g., 50th to 60th instance
System.out.println("Drawing is finished!");
continue loopExplanationVisualisation;
}
/************************************* FOLDS *********************************************************************/
for (int f = 0; f < folds; f++){
unInfFeatures.clear(); //clear set for each fold
int minN=minNoise;
if(!jakulin){
attrImpListMDL.println("\t\t\t\t\t\t\t\t--------------");
attrImpListMDL.printf("\t\t\t\t\t\t\t\t\tFold %2d\n",(f+1));
attrImpListMDL.println("\t\t\t\t\t\t\t\t--------------");
attrImpListReliefF.println("\t\t\t\t\t\t\t\t--------------");
attrImpListReliefF.printf("\t\t\t\t\t\t\t\t\tFold %2d\n",(f+1));
attrImpListReliefF.println("\t\t\t\t\t\t\t\t--------------");
}
if(folds==1){
StratifiedRemoveFolds fold;
fold = new StratifiedRemoveFolds();
fold.setInputFormat(randData);
fold.setSeed(1);
fold.setNumFolds(splitTrain);
fold.setFold(splitTrain);
fold.setInvertSelection(true); //because we invert selection we take all folds except the "split" one
data = Filter.useFilter(randData,fold);
fold = new StratifiedRemoveFolds();
fold.setInputFormat(randData);
fold.setSeed(1);
fold.setNumFolds(splitTrain);
fold.setFold(splitTrain);
fold.setInvertSelection(false);
test = Filter.useFilter(randData,fold);
}
else{
data = randData.trainCV(folds, f,rand); //same as WEKA GUI
test= randData.testCV(folds, f);
}
rwm=new ReplaceMissingValues();
rwm.setInputFormat(data);
data=Filter.useFilter(data, rwm);
test=Filter.useFilter(test, rwm); //insert mean values from train dataset
discIntervals.println("\t\t\t\t\t\t\t\t--------------");
discIntervals.printf("\t\t\t\t\t\t\t\t\tFold %2d\n",(f+1));
discIntervals.println("\t\t\t\t\t\t\t\t--------------");
namesOfDiscAttr(data); //save discretization intervals
ModelAndAcc ma;
Classifier model;
for (int m=0;m<clsTab.length;m++){
model=clsTab[m];
t1=new Timer();
t1.start();
ma=evaluateModel(data, test, model);
t1.stop();
accOrigModelByFolds[m][f]=ma.getAcc();
learnAllTime[m][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=new J48();
j48=(J48)(model);
treeSize[f]=j48.measureTreeSize();
numOfLeaves[f]=j48.measureNumLeaves();
sumOfTerms[f]=sumOfTermsInConstrInTree(data,data.numAttributes()-1, j48);
ratioTermsNodes[f]=(treeSize[f]-numOfLeaves[f])==0 ? 0 : sumOfTerms[f]/(treeSize[f]-numOfLeaves[f]);
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=new FURIA();
fu=(FURIA)(model);
numOfRules[f]= fu.getRuleset().size();
numOfTerms[f]=sumOfTermsInConstrInRule(fu.getRuleset(),data);
numConstructsPerRule[f]=numOfRules[f]==0 ? 0 : (numOfTerms[f]/numOfRules[f]);
}
}
if(!exhaustive){
double allExplanations[][]=null;
double allWeights[][]=null;
float allExplanationsSHAP[][];
float allWeightsSHAP[][]=null;
List<String>impInter=null;
Set<String> attrGroups= new LinkedHashSet<>(); //we want to keep the insertion order, we don't want duplicates - use of LinkedHashSet
int numClasses=1; //1 - just one iteration, we explain minority class, otherwise numClasses=classDistr.length; we explain all classes
if(explAllClasses)
numClasses=classDistr.length;
/*SHAP*/
if(treeSHAP){
/*XGBOOST*/
DMatrix trainMat = wekaInstancesToDMatrix(data);
DMatrix testMat =wekaInstancesToDMatrix(test);
float tmpContrib[][];
int numOfClasses=data.numClasses();
HashMap<String, Object> params = new HashMap<>();
params.put("eta", eta); //"eta - learning_rate ("shrinkage" parameter)": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] It is advised to have small values of eta in the range of 0.1 to 0.3 because of overfitting
params.put("max_depth", maxDepth);
params.put("silent", 1); //print
params.put("nthread", processors);
params.put("gamma", gamma); //"gamma-min_split_loss": [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], gamma works by regularising using "across trees" information
if(numOfClasses==2){ //for binary examples
params.put("objective", "binary:logistic"); //binary:logistic – logistic regression for binary classification, returns predicted probability (not class)
params.put("eval_metric", "error");
}
else{ //multi class problems
params.put("objective", "multi:softmax"); //multi:softprob multi:softmax
params.put("eval_metric", "merror");
params.put("num_class", (numOfClasses));
}
Map<String, DMatrix> watches = new HashMap<String, DMatrix>() {{
put("train", trainMat);
put("test", testMat);
}
};
//building model
t1=new Timer();
t1.start();
Booster booster = XGBoost.train(trainMat, params, numOfRounds, watches, null, null);
t1.stop();
modelBuildTime[f]=t1.diff();
String evalNameTest[]={"test"};
String evalNameTrain[]={"train"};
DMatrix [] testMatArr={testMat};
DMatrix [] trainMatArr={trainMat};
String accTrain=booster.evalSet(trainMatArr, evalNameTrain,0);
String accTest=booster.evalSet(testMatArr, evalNameTest,0);
testMatArr=null;
trainMatArr=null;
accExplAlgInt[f]=(1-Double.parseDouble(accTrain.split(":")[1]))*100; //internal evaluation of the model
accExplAlgTest[f]=(1-Double.parseDouble(accTest.split(":")[1]))*100; //evaluation on the test dataset
//explaining model
t1=new Timer();
t1.start();
if(f==0 && explAllClasses){
for(int d=0;d<numClasses;d++){
System.out.println("Alg. for searching concepts: TreeSHAP (XGBOOST) parameters: numOfRounds->"+numOfRounds+" maxDepth->"+maxDepth+" eta->"+eta+" gamma->"+gamma);
logFile.println("Alg. for searching concepts: TreeSHAP (XGBOOST) parameters: numOfRounds->"+numOfRounds+" maxDepth->"+maxDepth+" eta->"+eta+" gamma->"+gamma);
System.out.println("Explaining class: "+data.classAttribute().value(d)+" explaining whole dataset: "+(explAllData?"YES":"NO"));
logFile.println("Explaining class: "+data.classAttribute().value(d)+" explaining whole dataset: "+(explAllData?"YES":"NO"));
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
}
}
for(int c=0;c<numClasses;c++){//numClasses=1; (we explain minority class), numClasses=classDistr.length; (we explain all classes)
if(explAllClasses)
classToExplain=c;
Instances explainData=new Instances(data);
RemoveWithValues filter = new RemoveWithValues();
filter.setAttributeIndex("last") ; //class
filter.setNominalIndices((classToExplain+1)+""); //what we remove ... +1 because indexes go from 0, we need indexes from 1 for method setNominalIndices
filter.setInvertSelection(true); //if we invert selection than we keep selected data ...
filter.setInputFormat(explainData);
explainData = Filter.useFilter(explainData, filter);
if(f==0 && !explAllClasses){ //print this info only once
System.out.println("Alg. for searching concepts: TreeSHAP (XGBOOST) parameters: numOfRounds->"+numOfRounds+" maxDepth->"+maxDepth+" eta->"+eta+" gamma->"+gamma);
logFile.println("Alg. for searching concepts: TreeSHAP (XGBOOST) parameters: numOfRounds->"+numOfRounds+" maxDepth->"+maxDepth+" eta->"+eta+" gamma->"+gamma);
System.out.println("Explaining class: "+data.classAttribute().value(classToExplain)+" explaining whole dataset: "+(explAllData?"YES":"NO"));
logFile.println("Explaining class: "+data.classAttribute().value(classToExplain)+" explaining whole dataset: "+(explAllData?"YES":"NO"));
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
}
numInst=data.attributeStats(data.classIndex()).nominalCounts[classToExplain]; //number of instances in explained class
if(numInst==0)
continue; //class has no instances e.g., class -3 in dataset autos
//we are explaining just instances from the explained class and not also from other classes
//if we have more than maxToExplain (e.g. 500) instances we take only maxToExplain instances
if(!explAllData){
if(numInst>maxToExplain){
System.out.println("We take only "+maxToExplain+" instances out of "+numInst+" from the class "+data.classAttribute().value(classToExplain)+".");
impGroups.println("We take only "+maxToExplain+" instances out of "+numInst+" from the class "+data.classAttribute().value(classToExplain)+".");
explainData.randomize(rand);
explainData = new Instances(explainData, 0, maxToExplain);
numInst=explainData.attributeStats(explainData.classIndex()).nominalCounts[classToExplain]; //for correct print on output
}
}
DMatrix explainMat = wekaInstancesToDMatrix(explainData);
if(explAllData)
tmpContrib=booster.predictContrib(trainMat, 0); //Tree SHAP ... for each feature, and last for bias matrix of size (nsample, nfeats + 1) ... feature contributions (SHAP xgboost predict)
else
tmpContrib=booster.predictContrib(explainMat, 0); //tree limit - Limit number of trees in the prediction; defaults to 0 (use all trees).
explainMat.dispose();
testMat.dispose();
trainMat.dispose();
t1.stop();
//Note that shap_values for the two classes are additive inverses for a binary classification problem!!!
//The variant of SHAP which deals with trees (TreeSHAP) calculates exact Shapley values and does it fast.
if(numOfClasses==2){
allExplanationsSHAP=removeCol(tmpContrib, tmpContrib[0].length-1); //we remove last column, because we do not need column with bias
}
else{
int idxQArr[]=new int[data.numAttributes()-1];
if(classToExplain==0)
for(int i=0;i<idxQArr.length;i++)
idxQArr[i]=i;
else{
int start=(classToExplain*data.numAttributes()-1)+1;
int j=0;
for(int i=start;i<=idxQArr.length*(classToExplain+1);i++){
idxQArr[j]=i;
j++;
}
}
allExplanationsSHAP=someColumns(tmpContrib, idxQArr); //we take just columns of attributes from the class that we explain
}
if(numInst<minExplInst)
minN=minMinNoise;
double noiseThr=(numInst*NOISE)/100.0; //we take number of noise threshold from the number of explained instances
int usedNoise=Math.max((int)Math.ceil(noiseThr),minN); //makes sense only if NOISE=0
System.out.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Tree SHAP num of expl. inst. "+(explAllData ? data.numInstances() : numInst)+" (fold "+(f+1)+"). "+"Explaining class: "+data.classAttribute().value(classToExplain)+".");
impGroups.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Tree SHAP num of expl. inst. "+(explAllData ? data.numInstances() : numInst)+" (fold "+(f+1)+"). "+"Explaining class: "+data.classAttribute().value(classToExplain)+").");
impGroups.println("Lower threshold thrL: "+thrL+" upper threshold thrU: "+thrU+" with step: "+step);
impGroups.println("\t\t\t\t\t\t\t\t--------------");
impGroups.printf("\t\t\t\t\t\t\t\t\tFold %2d\n",(f+1));
impGroups.println("\t\t\t\t\t\t\t\t--------------");
if(groupsByThrStat && !visualisation && !justExplain && !exhaustive && !jakulin && f==0){
DecimalFormat df = new DecimalFormat("0.0");
for(double q=thrL;q<=thrU;q=q+step)
groupsStat.write(df.format(q)+";");
groupsStat.println();
}
for(double q=thrL;q<=thrU;q=q+step){
impGroups.println("--------------");
impGroups.printf("Threshold: %2.2f\n",round(q,1));
impGroups.println("--------------");
allWeightsSHAP=setWeights(data,allExplanationsSHAP,round(q,1));
impInter=(getMostFqSubsets(allWeightsSHAP,data,usedNoise));
attrGroups.addAll(impInter);
if(groupsByThrStat && !visualisation && !justExplain && !exhaustive && !jakulin)
groupsStat.write(impInter.size()+";");
}
if(groupsByThrStat && !visualisation && !justExplain && !exhaustive && !jakulin)
groupsStat.println();
}//loop explain (all) class(es)
}
else{
//System.out.println("Building model ...");
t1=new Timer();
t1.start();
predictionModel.buildClassifier(data);
t1.stop();
//System.out.println("Prediction model created.");
modelBuildTime[f]=t1.diff();
if(excludeUppers(predictionModel.getClass().getSimpleName()).equals("RF")){ //OOB
rf=(RandomForest)predictionModel;
oobRF[f]=(1-rf.measureOutOfBagError())*100;
}
//evaluate prediction model on test data
Evaluation eval = new Evaluation(data);
eval.evaluateModel(predictionModel, test);
accExplAlgTest[f]=(eval.correct())/(eval.incorrect()+eval.correct())*100.00; //same as 1-eval.errorRate())*100.0
/*IME*/
if(f==0 && explAllClasses){
for(int d=0;d<numClasses;d++){
System.out.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
logFile.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
System.out.println("Explaining class: "+data.classAttribute().value(d)+", explaining whole dataset: "+(explAllData?"YES":"NO"));
logFile.println("Explaining class: "+data.classAttribute().value(d)+", explaining all dataset: "+(explAllData?"YES":"NO"));
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
switch(method){
case aproxErrSampling:
System.out.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
logFile.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
break;
}
}
}
for(int i=0;i<numClasses;i++){ //numClasses=1; (we explain minority class), numClasses=classDistr.length; (we explain all classes)
if(explAllClasses)
classToExplain=i;
if(f==0 && !explAllClasses){ //print this info only once
System.out.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
logFile.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
System.out.println("Explaining class: "+data.classAttribute().value(classToExplain)+", explaining whole dataset: "+(explAllData?"YES":"NO"));
logFile.println("Explaining class: "+data.classAttribute().value(classToExplain)+", explaining all dataset: "+(explAllData?"YES":"NO"));
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
switch(method){
case aproxErrSampling:
System.out.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
logFile.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
break;
}
}
numInst=data.attributeStats(data.classIndex()).nominalCounts[classToExplain]; //number of instances in explained class
if(numInst==0)
continue; //class has no instances e.g., class -3 in dataset autos
Instances explainData=new Instances(data);
RemoveWithValues filter = new RemoveWithValues();
filter.setAttributeIndex("last") ; //class
filter.setNominalIndices((classToExplain+1)+""); //what we remove ... +1 because indexes go from 0, we need indexes from 1 for method setNominalIndices
filter.setInvertSelection(true); //if we invert selection then we keep selected data ...
filter.setInputFormat(explainData);
explainData = Filter.useFilter(explainData, filter);
//we are explaining just instances from the explained class and not also instances from other classes
//if we have more than maxToExplain (e.g. 500) instances we take only maxToExplain instances
if(!explAllData){
if(numInst>maxToExplain){
System.out.println("We take only "+maxToExplain+" instances out of "+numInst+" from the class "+data.classAttribute().value(classToExplain)+".");
impGroups.println("We take only "+maxToExplain+" instances out of "+numInst+" from the class "+data.classAttribute().value(classToExplain)+".");
explainData.randomize(rand);
explainData = new Instances(explainData, 0, maxToExplain);
numInst=explainData.attributeStats(explainData.classIndex()).nominalCounts[classToExplain]; //for correct print on output
}
t1=new Timer();
t1.start();
switch (method){
case equalSampling:
allExplanations=IME.explainAllDatasetES(data,explainData,predictionModel,N_SAMPLES, classToExplain); //equal sampling
break;
case adaptiveSamplingSS:
allExplanations=IME.explainAllDatasetAS(data,explainData,predictionModel, minS, sumOfSmp, classToExplain); //we need sumOfSmp (sum of samples) for additive sampling
break;
case adaptiveSamplingAE:
allExplanations=IME.explainAllDatasetAS(data,explainData,predictionModel, minS, classToExplain, error, pctErr);
break;
case aproxErrSampling:
allExplanations=IME.explainAllDatasetAES(predictionModel, data, explainData,true, classToExplain, minS, error, pctErr);
break;
}
t1.stop();
}
else{
t1=new Timer();
t1.start();
switch (method){
case equalSampling:
allExplanations=IME.explainAllDatasetES(data,data,predictionModel,N_SAMPLES, classToExplain); //equal sampling
break;
case adaptiveSamplingSS:
allExplanations=IME.explainAllDatasetAS(data,data,predictionModel, minS, sumOfSmp, classToExplain);//we need sumOfSmp (sum of samples) for additive sampling
break;
case adaptiveSamplingAE:
allExplanations=IME.explainAllDatasetAS(data,data,predictionModel, minS, classToExplain, error, pctErr);
break;
case aproxErrSampling:
allExplanations=IME.explainAllDatasetAES(predictionModel, data, data, true, classToExplain, minS, error,pctErr);
break;
}
t1.stop();
}
if(numInst<minExplInst)
minN=minMinNoise;
double noiseThr=(numInst*NOISE)/100.0;//we take number of noise threshold from the number of explained instances
int usedNoise=Math.max((int)Math.ceil(noiseThr),minN); //makes sense only if NOISE=0 or num of explained instances is very low
System.out.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Number of instances from class ("+explainData.classAttribute().value(classToExplain)+") is "+numInst+" (fold "+(f+1)+").");
impGroups.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Number of instances from class ("+explainData.classAttribute().value(classToExplain)+") is "+numInst+" (fold "+(f+1)+").");
impGroups.println("Lower threshold thrL: "+thrL+" upper threshold thrU: "+thrU+" with step: "+step);
impGroups.println("\t\t\t\t\t\t\t\t--------------");
impGroups.printf("\t\t\t\t\t\t\t\t\tFold %2d\n",(f+1));
impGroups.println("\t\t\t\t\t\t\t\t--------------");
for(double q=thrL;q<=thrU;q=q+step){
impGroups.println("--------------");
impGroups.printf("Threshold: %2.2f\n",round(q,1));
impGroups.println("--------------");
allWeights=setWeights(data,allExplanations,round(q,1));
impInter=(getMostFqSubsets(allWeights,data,usedNoise));
attrGroups.addAll(impInter);
}
} //loop explain (all) class(es)
} //condition SHAP or IME
exlpTime[f]=t1.diff();
numOfExplainedInst[f]=numInst;
listOfConcepts = new ArrayList<>(attrGroups);
if(listOfConcepts.size()==0){//if we didn't find any concepts in this fold we take results from the fold that doesn't consist of CI
//take ACC and learning time from orig. dataset
for (int i=0;i<clsTab.length;i++){
//logical
accByFoldsLF[i][f]=accOrigModelByFolds[i][f];
learnAllFCTimeLF[i][f]=learnAllTime[i][f];
//numerical
accByFoldsNum[i][f]=accOrigModelByFolds[i][f];
learnAllFCTimeNum[i][f]=learnAllTime[i][f];
//Cartesian product
accByFoldsCP[i][f]=accOrigModelByFolds[i][f];
learnAllFCTimeCP[i][f]=learnAllTime[i][f];
//relational
accByFoldsRE[i][f]=accOrigModelByFolds[i][f];
learnAllFCTimeRE[i][f]=learnAllTime[i][f];
//FURIA and thr
accuracyByFoldsFuriaThr[i][f]=accOrigModelByFolds[i][f];
learnFuriaThrTime[i][f]=learnAllTime[i][f];
//All features
accuracyByFolds[i][f]=accOrigModelByFolds[i][f];
learnAllFCTime[i][f]=learnAllTime[i][f];
//FS on validation dataset
accuracyByFoldsPS[i][f]=accOrigModelByFolds[i][f];
paramSLearnT[i][f]=learnAllTime[i][f];
}
//take treeSize from orig. dataset
//logical
numOfTreeByFoldsLF[0][f]=treeSize[f];
//numerical
numOfTreeByFoldsNum[0][f]=treeSize[f];
//Cartesian product
numOfTreeByFoldsCP[0][f]=treeSize[f];
//relational
numOfTreeByFoldsRE[0][f]=treeSize[f];
//FURIA and thr
numTreeByFoldsFuriaThr[0][f]=treeSize[f];
//All features
numberOfTreeByFolds[0][f]=treeSize[f];
//FS on validation dataset
numberOfTreeByFoldsPS[0][f]=treeSize[f];
//take numOfLeaves from orig. dataset
//logical
numOfTreeByFoldsLF[1][f]=numOfLeaves[f];
//numerical
numOfTreeByFoldsNum[1][f]=numOfLeaves[f];
//Cartesian product
numOfTreeByFoldsCP[1][f]=numOfLeaves[f];
//relational
numOfTreeByFoldsRE[1][f]=numOfLeaves[f];
//FURIA and thr
numTreeByFoldsFuriaThr[1][f]=numOfLeaves[f]; //numOfLeaves
//All features
numberOfTreeByFolds[1][f]=numOfLeaves[f]; //numOfLeaves
//FS on validation dataset
numberOfTreeByFoldsPS[1][f]=numOfLeaves[f];
//take sumOfTerms from orig. dataset
//logical
numOfTreeByFoldsLF[2][f]=sumOfTerms[f];
//numerical
numOfTreeByFoldsNum[2][f]=sumOfTerms[f];
//Cartesian product
numOfTreeByFoldsCP[2][f]=sumOfTerms[f];
//relational
numOfTreeByFoldsRE[2][f]=sumOfTerms[f];
//FURIA and thr
numTreeByFoldsFuriaThr[2][f]=sumOfTerms[f];
//All features
numberOfTreeByFolds[2][f]=sumOfTerms[f];
//FS on validation dataset
numberOfTreeByFoldsPS[2][f]=sumOfTerms[f];
//ratio between sumOfTerms and nodes in original dataset is 1 - one node equals one attribute it can be also 0, just one leave!!!
numOfTreeByFoldsLF[3][f]=ratioTermsNodes[f]; //logical
numOfTreeByFoldsNum[3][f]=ratioTermsNodes[f]; //numerical
numOfTreeByFoldsCP[3][f]=ratioTermsNodes[f]; //Cartesian product
numOfTreeByFoldsRE[3][f]=ratioTermsNodes[f]; //relational feat
numTreeByFoldsFuriaThr[3][f]=ratioTermsNodes[f]; //FURIA and thr
numberOfTreeByFolds[3][f]=ratioTermsNodes[f]; //All features
numberOfTreeByFoldsPS[3][f]=ratioTermsNodes[f]; //FS
//take numOfRules and numOfTerms from orig. dataset
//logical
numOfRulesByFoldsLF[f]=numOfRules[f];
numOfTermsByFoldsLF[f]=numOfTerms[f];
numOfRatioByFoldsLF[f]=numConstructsPerRule[f];
//numerical
numOfRulesByFoldsNum[f]=numOfRules[f];
numOfTermsByFoldsNum[f]=numOfTerms[f];
numOfRatioByFoldsNum[f]=numConstructsPerRule[f];
//Cartesian product
numOfRulesByFoldsCP[f]=numOfRules[f];
numOfTermsByFoldsCP[f]=numOfTerms[f];
numOfRatioByFoldsCP[f]=numConstructsPerRule[f];
//relational
numOfRulesByFoldsRE[f]=numOfRules[f];
numOfTermsByFoldsRE[f]=numOfTerms[f];
numOfRatioByFoldsRE[f]=numConstructsPerRule[f];
//FURIA and thr
complexityOfFuria[0][f]=numOfRules[f];
complexityOfFuria[1][f]=numOfTerms[f];
complexityOfFuria[2][f]=numConstructsPerRule[f];
//All features
numOfRulesByFolds[f]=numOfRules[f];
numOfTermsByFoldsF[f]=numOfTerms[f];
numOfRatioByFoldsF[f]=numConstructsPerRule[f];
//FS on validation dataset
complexityOfFuriaPS[0][f]=numOfRules[f];
complexityOfFuriaPS[1][f]=numOfTerms[f];
complexityOfFuriaPS[2][f]=numConstructsPerRule[f];
}
}
else{
String idxOfAttr="";
for (int i=0;i<data.numAttributes()-1;i++)
if(i<data.numAttributes()-2)
idxOfAttr+=i+",";
else
idxOfAttr+=i;
System.out.println(idxOfAttr);
listOfConcepts = new ArrayList<>();
listOfConcepts.add(idxOfAttr); //we try all combinations
}
if(listOfConcepts.size()!=0 && !jakulin){
impGroups.println("*********************************************************************************");
impGroups.println("All potential concepts based on thresholds");
impGroups.print("\t"); printFqAttrOneRow(listOfConcepts,data);
impGroups.println("\n*********************************************************************************");
}
int sumMax[]=printMaxConstructLength(listOfConcepts); //sumMax[0] number of all constructs (attributes) in all groups
if(listOfConcepts.size()!=0)
avgTermsPerGroup[f]=sumMax[0]/(double)listOfConcepts.size(); //average number of constructs per group
avgTermsPerFold[f]=sumMax[0]; //number of all attributes in groups in one fold
maxGroupOfConstructs[f]=sumMax[1]; //length of the longest construct
t1=new Timer();
t1.start();
numOfGroupsOfFeatConstr[f]=listOfConcepts.size();
if(numOfGroupsOfFeatConstr[f]==0){
System.out.println("We didn't find any concepts in fold "+(f+1)+" above threshold max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(numInst*NOISE/100.0)+ ", minNoise="+minN+" we remove groups of size "+Math.max((int)Math.ceil(numInst*NOISE/100.0),minN));
logFile.println("We didn't find any concepts in fold "+(f+1)+" above threshold max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(numInst*NOISE/100.0)+ ", minNoise="+minN+" we remove groups of size "+Math.max((int)Math.ceil(numInst*NOISE/100.0),minN));
continue; //we skip constructive induction if we don't find any concepts
}
Instances trainFold= new Instances(data); //for logical and All features
Instances testFold=new Instances(test); //for logical and All features
Instances trainFoldNum= new Instances(data); //for numerical features (e.g., dataset Credit score)
Instances testFoldNum=new Instances(test); //for numerical features (e.g., dataset Credit score)
Instances trainFoldRE= new Instances(data); //for relational features
Instances testFoldRE=new Instances(test); //for relational features
Instances trainFoldCP= new Instances(data); //for Cartesian product
Instances testFoldCP=new Instances(test); //for Cartesian product
Instances trainFoldFU= new Instances(data); //for FURIA and thr
Instances testFoldFU=new Instances(test); //for FURIA and thr
int numOfOrigAttr=data.numAttributes()-1;
unInfFeatures.clear();
//logical features
int tmp[];
int nC[]; //for counting Cartesian or relational or numerical features in tree
unInfFeatures.clear();
if(exhaustive){
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("dd. MM. yyyy HH:mm:ss");
LocalDateTime now = LocalDateTime.now();
System.out.println("Starting FC (exhaustive search, All features method): "+dtf.format(now)+" fold: "+(f+1));
logFile.println("Starting FC (exhaustive search, All features method): "+dtf.format(now)+" fold: "+(f+1));
}
/**************** INTERACTION INFORMATION BY JAKULIN ******************/
if(jakulin){
boolean allDiscrete=true;
for(int i=0;i<trainFoldCP.numAttributes();i++)
if(trainFoldCP.attribute(i).isNumeric()){ //check if attribute is numeric
allDiscrete=false;
System.out.println("We found continuous attribute!");
break;
}
if(!allDiscrete){
//discretization
weka.filters.supervised.attribute.Discretize filter; //because of same class name in different packages
//setup filter
filter = new weka.filters.supervised.attribute.Discretize();
//Discretization is by Fayyad & Irani's MDL method (the default).
trainFoldCP.setClassIndex(trainFoldCP.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
testFoldCP.setClassIndex(testFoldCP.numAttributes()-1);
filter.setInputFormat(trainFoldCP);
//apply filter
trainFoldCP = Filter.useFilter(trainFoldCP, filter);
testFoldCP = Filter.useFilter(testFoldCP, filter); //we have to apply discretization on test dataset based on info from train dataset
}
List allCombSecOrdv2=allCombOfOrderN(listOfConcepts,2); //create groups for second ordered features
//calculates interaction information between all combinations of attributes - in some cases it can be too expensive
if(f==(folds-1)){
logFile.println("---------------------------------------------------------------------------------");
System.out.println("Number of all combinations: "+allCombSecOrdv2.size());
logFile.println("Number of all combinations: "+allCombSecOrdv2.size());
}
t1=new Timer();
t1.start();
List combInfInter=interInfoJakulin(trainFoldCP,allCombSecOrdv2, 4); //we take 4 best interaction combinations, for more info see A. Jakulin. Machine learning based on attribute interactions. PhD thesis, University of Ljubljana, Faculty of Computer and Information Science, 2005.
System.out.println("Important combinations based on intraction information (Jakulin) in DESC order");
impGroups.println("*********************************** Fold "+(f+1)+" - list of combinations in DESC order ***********************************");
printAttrNamesIntInf(trainFoldCP, combInfInter);
unInfFeatures.clear();
if(!allDiscrete){
trainFoldCP=addCartFeat(data, trainFoldCP,combInfInter,false,f,2,false); //Jakulin's method accepts all features
testFoldCP=addCartFeat(test, testFoldCP,combInfInter,false,f,2,false); //Jakulin's method accepts all features
}
else{
trainFoldCP=addCartFeat(trainFoldCP,combInfInter,false,f,2,false); //Jakulin's method accepts all features
testFoldCP=addCartFeat(testFoldCP,combInfInter,false,f,2,false); //Jakulin's method accepts all features
}
t1.stop();
cartesianFCTime[f]=t1.diff();
if(cartesianFCTime[f]>timeLimit){ //10800000ms = 3h
System.out.println("Time exceeds the limit ("+timeLimit +" [ms]) of FC - Jakulin's method for one fold!");
logFile.println("Time exceeds the limit ("+timeLimit +" [ms]) of FC - Jakulin's method for one fold!");
break loopExhaustiveTooLong;
}
tmp= numOfFeat(trainFoldCP, data.numAttributes()-1);
numFeatByFoldsCP[f]=tmp[3]; //Cartesian
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFoldCP,testFoldCP,model);
t1.stop();
accByFoldsCP[c][f]=ma.getAcc();
learnAllFCTimeCP[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numOfTreeByFoldsCP[0][f]=(int)j48.measureTreeSize(); //treeSize
numOfTreeByFoldsCP[1][f]=(int)j48.measureNumLeaves(); //numOfLeave
numOfTreeByFoldsCP[2][f]=sumOfTermsInConstrInTree(trainFoldCP, data.numAttributes()-1, j48); //sumOfTerms
numOfTreeByFoldsCP[3][f]=(numOfTreeByFoldsCP[0][f]-numOfTreeByFoldsCP[1][f])==0 ? 0 : numOfTreeByFoldsCP[2][f]/(numOfTreeByFoldsCP[0][f]-numOfTreeByFoldsCP[1][f]); //sum of terms of constr DIV num of nodes
nC=numOfCartFeatInTree(trainFoldCP, data.numAttributes()-1, j48);
numOfCartesian[f]=nC[0]; //number of Cartesian features in tree
sumOfConstrCart[f]=nC[1]; //sum of constructs (Cartesian features) in tree
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFoldsCP[f]=fu.getRuleset().size(); //System.out.println("All features "+fu.getRuleset().size());
numOfTermsByFoldsCP[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFoldCP);//System.out.println("All features "+countTermsOfConstructsFuria(fu.getRuleset(),trainFold));
numOfRatioByFoldsCP[f]=numOfRulesByFoldsCP[f]==0 ? 0 : (numOfTermsByFoldsCP[f]/numOfRulesByFoldsCP[f]);
}
}
if((f+1)==folds){
impGroups.println("---------------------------------------------------------------------------------");
}
}
else{
int N2=2;
List allCombSecOrd=allCombOfOrderN(listOfConcepts,N2); //create groups for second ordered features
/****************************************************************/
/********************* LOGICAL FEATURES *************************/
t1=new Timer();
t1.start();
if(logFeat){
//depth 2
for(String op : operationLogUse){
trainFold= addLogFeatDepth(trainFold, allCombSecOrd,OperationLog.valueOf(op), false, f, N2);
testFold= addLogFeatDepth(data, testFold, allCombSecOrd,OperationLog.valueOf(op), false, f, N2);
}
unInfFeatures.clear();
//construction depth higher than 2
if(featDepth>2){
for(int i=3;i<=featDepth; i++){
List allCombNthOrd=allCombOfOrderN(listOfConcepts,i);
for(String op : operationLogUse)
if(OperationLog.valueOf(op)==OperationLog.AND || OperationLog.valueOf(op)==OperationLog.OR){
trainFold= addLogFeatDepth(trainFold, allCombNthOrd,OperationLog.valueOf(op), false, f, i);
testFold= addLogFeatDepth(data, testFold, allCombNthOrd,OperationLog.valueOf(op), false, f, i);
}
}
}
unInfFeatures.clear();
t1.stop();
allFCTimeLF[f]=t1.diff(); //time for constructing logical features
if(exhaustive){
System.out.println("FC ended (exhaustive search, All features method) fold: "+(f+1)+" time for FC: "+allFCTimeLF[f]);
if(allFCTimeLF[f]>timeLimit){ //10800000ms = 3h
System.out.println("Time exceeds the limit ("+timeLimit +" [ms]) of FC for one fold!");
logFile.println("Time exceeds the limit ("+timeLimit +" [ms]) of FC for one fold!");
break loopExhaustiveTooLong;
}
}
tmp= numOfFeat(trainFold, data.numAttributes()-1);
numOfFeatByFoldsLF[f]=tmp[0]; //we count just logical features
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFold,testFold,model);
t1.stop();
accByFoldsLF[c][f]=ma.getAcc();
learnAllFCTimeLF[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numOfTreeByFoldsLF[0][f]=(int)j48.measureTreeSize(); //treeSize
numOfTreeByFoldsLF[1][f]=(int)j48.measureNumLeaves(); //numOfLeaves
numOfTreeByFoldsLF[2][f]=sumOfTermsInConstrInTree(trainFold, data.numAttributes()-1, j48); //sumOfTerms
numOfTreeByFoldsLF[3][f]=(numOfTreeByFoldsLF[0][f]-numOfTreeByFoldsLF[1][f])==0 ? 0 : numOfTreeByFoldsLF[2][f]/(numOfTreeByFoldsLF[0][f]-numOfTreeByFoldsLF[1][f]); //sum of terms of constructs DIV num of nodes
numOfLogicalInTree[0][f]=numOfLogFeatInTree(trainFold, data.numAttributes()-1, j48);
numOfLogicalInTree[1][f]=sumOfLFTermsInConstrInTree(trainFold, data.numAttributes()-1, j48);
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFoldsLF[f]=fu.getRuleset().size();
numOfTermsByFoldsLF[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFold);
numOfRatioByFoldsLF[f]=numOfRulesByFoldsLF[f]==0 ? 0 : (numOfTermsByFoldsLF[f]/numOfRulesByFoldsLF[f]);
}
}
}
/****************************************************************/
/******************** NUMERICAL FEATURES ************************/
//numerical operators: /, -, + ... e.g. for Credit score dataset
if(numerFeat){
unInfFeatures.clear();
t1.start();
for(String op : operationNumUse){
trainFoldNum=addNumFeat(trainFoldNum, OperationNum.valueOf(op), allCombSecOrd, true, rCaller, code);
testFoldNum=addNumFeat(testFoldNum, OperationNum.valueOf(op), allCombSecOrd, false, rCaller, code);
}
t1.stop();
numericalFCTime[f]=t1.diff();
tmp= numOfFeat(trainFoldNum, data.numAttributes()-1);
numFeatByFoldsNum[f]=tmp[5]; //numerical
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFoldNum,testFoldNum,model);
t1.stop();
accByFoldsNum[c][f]=ma.getAcc();
learnAllFCTimeNum[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numOfTreeByFoldsNum[0][f]=(int)j48.measureTreeSize(); //treeSize
numOfTreeByFoldsNum[1][f]=(int)j48.measureNumLeaves(); //numOfLeave
numOfTreeByFoldsNum[2][f]=sumOfTermsInConstrInTree(trainFoldNum, data.numAttributes()-1, j48); //sumOfTerms
numOfTreeByFoldsNum[3][f]=(numOfTreeByFoldsNum[0][f]-numOfTreeByFoldsNum[1][f])==0 ? 0 : numOfTreeByFoldsNum[2][f]/(numOfTreeByFoldsNum[0][f]-numOfTreeByFoldsNum[1][f]); //sum of terms of constructs DIV num of nodes
nC=numOfNumFeatInTree(trainFoldNum, data.numAttributes()-1, j48);
numOfNumerical[f]=nC[0]; //number of numerical features in tree
sumOfConstrNum[f]=nC[1]; //sum of constructs (numerical features) in tree
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFoldsNum[f]=fu.getRuleset().size();
numOfTermsByFoldsNum[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFoldNum);
numOfRatioByFoldsNum[f]=numOfRulesByFoldsNum[f]==0 ? 0 : (numOfTermsByFoldsNum[f]/numOfRulesByFoldsNum[f]);
}
}
}
/****************************************************************/
/********************* RELATIONAL FEATURES **********************/
if(relatFeat){
unInfFeatures.clear();
t1.start();
for(String op : operationRelUse){
trainFoldRE=addRelFeat(trainFoldRE,allCombSecOrd,OperationRel.valueOf(op),true,f); //true ... we count and remove uninformative features
testFoldRE=addRelFeat(testFoldRE,allCombSecOrd,OperationRel.valueOf(op),false,f); //false .. we just skip uninformative features
}
t1.stop();
relationalFCTime[f]=t1.diff();
tmp= numOfFeat(trainFoldRE, data.numAttributes()-1);
numFeatByFoldsRE[f]=tmp[4]; //relational
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFoldRE,testFoldRE,model);
t1.stop();
accByFoldsRE[c][f]=ma.getAcc();
learnAllFCTimeRE[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numOfTreeByFoldsRE[0][f]=(int)j48.measureTreeSize(); //treeSize
numOfTreeByFoldsRE[1][f]=(int)j48.measureNumLeaves(); //numOfLeave
numOfTreeByFoldsRE[2][f]=sumOfTermsInConstrInTree(trainFoldRE, data.numAttributes()-1, j48); //sumOfTerms
numOfTreeByFoldsRE[3][f]=(numOfTreeByFoldsRE[0][f]-numOfTreeByFoldsRE[1][f])==0 ? 0 : numOfTreeByFoldsRE[2][f]/(numOfTreeByFoldsRE[0][f]-numOfTreeByFoldsRE[1][f]); //sum of terms of constr DIV num of nodes
nC=numOfRelFeatInTree(trainFoldRE, data.numAttributes()-1, j48);
numOfRelational[f]=nC[0]; //number of relational features in tree
sumOfConstrRel[f]=nC[1]; //sum of constructs (relational features) in tree
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFoldsRE[f]=fu.getRuleset().size();
numOfTermsByFoldsRE[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFoldRE);
numOfRatioByFoldsRE[f]=numOfRulesByFoldsRE[f]==0 ? 0 : (numOfTermsByFoldsRE[f]/numOfRulesByFoldsRE[f]);
}
}
}
/****************************************************************/
/*********************** CARTESIAN PRODUCT **********************/
if(cartFeat){
boolean allDiscrete=true;
for(int i=0;i<trainFoldCP.numAttributes();i++)
if(trainFoldCP.attribute(i).isNumeric()){ //check if attribute is numeric
allDiscrete=false;
System.out.println("We found continuous attribute!");
break;
}
t1.start();
if(!allDiscrete){
//discretization
weka.filters.supervised.attribute.Discretize filter; //because of same class name in different packages
// setup filter
filter = new weka.filters.supervised.attribute.Discretize();
//Discretization is by Fayyad & Irani's MDL method (the default).
//filter.
trainFoldCP.setClassIndex(trainFoldCP.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
testFoldCP.setClassIndex(testFoldCP.numAttributes()-1);
//filter.setUseBinNumbers(true); //eg BXofY ... B1of1
filter.setInputFormat(trainFoldCP);
//apply filter
trainFoldCP = Filter.useFilter(trainFoldCP, filter);
testFoldCP = Filter.useFilter(testFoldCP, filter); //we have to apply discretization on test dataset based on info from train dataset
}
unInfFeatures.clear();
if(!allDiscrete){
trainFoldCP=addCartFeat(data, trainFoldCP,allCombSecOrd,false,f,2,true);
testFoldCP=addCartFeat(test, testFoldCP,allCombSecOrd,false,f,2,false); //we don't apply uninformative features
}
else{
trainFoldCP=addCartFeat(trainFoldCP,allCombSecOrd,false,f,2,true);
testFoldCP=addCartFeat(testFoldCP,allCombSecOrd,false,f,2,false); //we don't apply uninformative features
}
t1.stop();
cartesianFCTime[f]=t1.diff();
tmp= numOfFeat(trainFoldCP, data.numAttributes()-1);
numFeatByFoldsCP[f]=tmp[3]; //Cartesian
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFoldCP,testFoldCP,model);
t1.stop();
accByFoldsCP[c][f]=ma.getAcc();
learnAllFCTimeCP[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numOfTreeByFoldsCP[0][f]=(int)j48.measureTreeSize(); //treeSize
numOfTreeByFoldsCP[1][f]=(int)j48.measureNumLeaves(); //numOfLeave
numOfTreeByFoldsCP[2][f]=sumOfTermsInConstrInTree(trainFoldCP, data.numAttributes()-1, j48); //sumOfTerms
numOfTreeByFoldsCP[3][f]=(numOfTreeByFoldsCP[0][f]-numOfTreeByFoldsCP[1][f])==0 ? 0 : numOfTreeByFoldsCP[2][f]/(numOfTreeByFoldsCP[0][f]-numOfTreeByFoldsCP[1][f]); //sum of terms of constr DIV num of nodes
nC=numOfCartFeatInTree(trainFoldCP, data.numAttributes()-1, j48);
numOfCartesian[f]=nC[0]; //number of Cartesian features in tree
sumOfConstrCart[f]=nC[1]; //sum of constructs (Cartesian features) in tree
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFoldsCP[f]=fu.getRuleset().size();
numOfTermsByFoldsCP[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFoldCP);
numOfRatioByFoldsCP[f]=numOfRulesByFoldsCP[f]==0 ? 0 : (numOfTermsByFoldsCP[f]/numOfRulesByFoldsCP[f]);
}
}
}
/****************************************************************/
/**************** FURIA AND THRESHOLD FEATURES ******************/
if(decRuleFeat || thrFeat){
t1.start();
List<String> listOfFeat;
listOfFeat=genFeatFromFuria(trainFoldFU, (ArrayList<String>) listOfConcepts, classToExplain, cf, pci,covering, featFromExplClass);
unInfFeatures.clear();
if(decRuleFeat){
trainFoldFU=addFeatures(trainFoldFU, (ArrayList<String>) listOfFeat, true); //add features from Furia
testFoldFU=addFeatures(testFoldFU, (ArrayList<String>) listOfFeat, false); //add features from Furia
}
unInfFeatures.clear();
//num-of-N features ... we are counting true conditions from rules
if(thrFeat){
trainFoldFU=addFeatNumOfN(trainFoldFU, (ArrayList<String>) listOfFeat, true); //add num-Of-N features for evaluation
testFoldFU=addFeatNumOfN(testFoldFU, (ArrayList<String>) listOfFeat, false); //add num-Of-N features for evaluation
}
t1.stop();
furiaThrTime[f]=t1.diff();
tmp= numOfFeat(trainFoldFU, data.numAttributes()-1);
numFeatByFoldsFuriaThr[0][f]=tmp[1]; //threshold
numFeatByFoldsFuriaThr[1][f]=tmp[2]; //FURIA
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFoldFU,testFoldFU,model);
t1.stop();
accuracyByFoldsFuriaThr[c][f]=ma.getAcc();
learnFuriaThrTime[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numTreeByFoldsFuriaThr[0][f]=(int)j48.measureTreeSize(); //treeSize
numTreeByFoldsFuriaThr[1][f]=(int)j48.measureNumLeaves(); //numOfLeaves
numTreeByFoldsFuriaThr[2][f]=sumOfTermsInConstrInTree(trainFoldFU, data.numAttributes()-1, j48); //sumOfTerms
numTreeByFoldsFuriaThr[3][f]=(numTreeByFoldsFuriaThr[0][f]-numTreeByFoldsFuriaThr[1][f])==0 ? 0 : numTreeByFoldsFuriaThr[2][f]/(numTreeByFoldsFuriaThr[0][f]-numTreeByFoldsFuriaThr[1][f]);
int furiaThrC[]=numOfDrThrFeatInTree(trainFoldFU, data.numAttributes()-1, j48);
numOfFuriaThrInTreeByFoldsF[0][f]=furiaThrC[0];
numOfFuriaThrInTreeByFoldsF[1][f]=furiaThrC[1];
numOfFuriaThrInTreeByFoldsF[2][f]=furiaThrC[2];
numOfFuriaThrInTreeByFoldsF[3][f]=furiaThrC[3];
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
complexityOfFuria[0][f]=fu.getRuleset().size();
complexityOfFuria[1][f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFoldFU);
complexityOfFuria[2][f]=complexityOfFuria[0][f]==0 ? 0 : (complexityOfFuria[1][f]/complexityOfFuria[0][f]);
}
}
}
/****************************************************************/
/************************ ALL FEATURES **************************/
/*Merge all types of features that are included in contruction*/
/*get all constructed Logical features, without class; if logical features are not included then we take original attributes without class attribute*/
Remove remove= new Remove();
remove.setAttributeIndices("last");
remove.setInputFormat(trainFold);
trainFold = Filter.useFilter(trainFold, remove);
remove.setAttributeIndices("last");
remove.setInputFormat(testFold);
testFold = Filter.useFilter(testFold, remove);
/*get all constructed numerical features, without class*/
if(numerFeat){
if(!(numOfOrigAttr==trainFoldNum.numAttributes()-1)){ //if we don't get any feature from numerical then we skip merge with numerical feat
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(trainFoldNum.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(trainFoldNum);
trainFoldNum = Filter.useFilter(trainFoldNum, remove);
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(testFoldNum.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(testFoldNum);
testFoldNum = Filter.useFilter(testFoldNum, remove);
/*merge before constructed features and numerical features*/
trainFold=Instances.mergeInstances(trainFold,trainFoldNum);
testFold=Instances.mergeInstances(testFold,testFoldNum);
}
}
/*get all constructed relational features, without class*/
if(relatFeat){
if(!(numOfOrigAttr==trainFoldRE.numAttributes()-1)){ //if we don't get any feature from Relational then we skip merge with relational feat
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(trainFoldRE.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(trainFoldRE);
trainFoldRE = Filter.useFilter(trainFoldRE, remove);
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(testFoldRE.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(testFoldRE);
testFoldRE = Filter.useFilter(testFoldRE, remove);
/*merge before constructed features and relational features*/
trainFold=Instances.mergeInstances(trainFold,trainFoldRE);
testFold=Instances.mergeInstances(testFold,testFoldRE);
}
}
/*get all constructed Cartesian product features, without class*/
if(cartFeat){
if(!(numOfOrigAttr==trainFoldCP.numAttributes()-1)){ //if we don't get any feature from CP then we skip merge with Cartesian
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(trainFoldCP.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(trainFoldCP);
trainFoldCP = Filter.useFilter(trainFoldCP, remove);
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(testFoldCP.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(testFoldCP);
testFoldCP = Filter.useFilter(testFoldCP, remove);
/*merge before constructed features and Cartesian product features*/
trainFold=Instances.mergeInstances(trainFold,trainFoldCP);
testFold=Instances.mergeInstances(testFold,testFoldCP);
}
}
/*get all constructed decision rules and threshold features, without class*/
if(decRuleFeat || thrFeat){
if(!(numOfOrigAttr==trainFoldFU.numAttributes()-1)){ //if we don't get any feature from FU then we skip merge with Cartesian
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(trainFoldFU.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(trainFoldFU);
trainFoldFU = Filter.useFilter(trainFoldFU, remove);
remove.setAttributeIndices((numOfOrigAttr+1)+"-"+(testFoldFU.numAttributes()-1));
remove.setInvertSelection(true);
remove.setInputFormat(testFoldFU);
testFoldFU = Filter.useFilter(testFoldFU, remove);
/*merge before constructed features and decision rule and/or threshold features*/
trainFold=Instances.mergeInstances(trainFold,trainFoldFU);
testFold=Instances.mergeInstances(testFold,testFoldFU);
}
}
//add class to constructed features
remove.setAttributeIndices("last"); //we need class attribute
remove.setInvertSelection(true);
remove.setInputFormat(data);
Instances trainClassAttr = Filter.useFilter(data, remove);
remove.setAttributeIndices("last"); //we need class attribute
remove.setInvertSelection(true);
remove.setInputFormat(test);
Instances testClassAttr = Filter.useFilter(test, remove);
trainFold=Instances.mergeInstances(trainFold,trainClassAttr);
testFold=Instances.mergeInstances(testFold,testClassAttr);
//set class index
trainFold.setClassIndex(trainFold.numAttributes()-1);
testFold.setClassIndex(testFold.numAttributes()-1);
allFCTime[f]=numerFeat ? (allFCTimeLF[f]+numericalFCTime[f]+relationalFCTime[f]+cartesianFCTime[f]+furiaThrTime[f]) : (allFCTimeLF[f]+relationalFCTime[f]+cartesianFCTime[f]+furiaThrTime[f]);
if(exhaustive){
System.out.println("FC time (exhaustive search): "+allFCTime[f]+" All features method, fold: "+(f+1));
logFile.println("FC time (exhaustive search): "+allFCTime[f]+" All features method, fold: "+(f+1));
}
if(!jakulin){
attrImpListMDL.println("Feature evaluation: MDL (All features dataset) - After CI");
mdlCORElearn(trainFold, rCaller, code);
attrImpListReliefF.println("Feature evaluation: ReliefF (All features dataset) - After CI");
reliefFcalcDistanceOnAttributes(data, trainFold);
}
tmp = numOfFeat(trainFold, data.numAttributes()-1);
numberOfFeatByFolds[0][f]=tmp[0]; //logical
numberOfFeatByFolds[1][f]=tmp[1]; //threshold
numberOfFeatByFolds[2][f]=tmp[2]; //decision rule (FURIA)
numberOfFeatByFolds[3][f]=tmp[3]; //Cartesian
numberOfFeatByFolds[4][f]=tmp[4]; //relational
numberOfFeatByFolds[5][f]=tmp[5]; //numerical
for(int c=0;c<clsTab.length;c++){
model=clsTab[c];
t1.start();
ma=evaluateModel(trainFold,testFold,model);
t1.stop();
accuracyByFolds[c][f]=ma.getAcc();
learnAllFCTime[c][f]=t1.diff();
model=ma.getClassifier();
if(excludeUppers(model.getClass().getSimpleName()).equals("J48")){
j48=(J48)(model);
numberOfTreeByFolds[0][f]=(int)j48.measureTreeSize(); //treeSize
numberOfTreeByFolds[1][f]=(int)j48.measureNumLeaves(); //numOfLeaves
numberOfTreeByFolds[2][f]=sumOfTermsInConstrInTree(trainFold, data.numAttributes()-1, j48); //sumOfTerms
numberOfTreeByFolds[3][f]=(numberOfTreeByFolds[0][f]-numberOfTreeByFolds[1][f])==0 ? 0 : numberOfTreeByFolds[2][f]/(numberOfTreeByFolds[0][f]-numberOfTreeByFolds[1][f]); //sum of terms of constr DIV num of nodes
numOfLogInTreeAll[f]=numOfLogFeatInTree(trainFold, data.numAttributes()-1, j48);
sumOfConstrLFAll[f]=sumOfLFTermsInConstrInTree(trainFold, data.numAttributes()-1, j48);
nC=numOfNumFeatInTree(trainFold, data.numAttributes()-1, j48);
numOfNumInTreeAll[f]=nC[0];
sumOfConstrNumAll[f]=nC[1];
nC=numOfRelFeatInTree(trainFold, data.numAttributes()-1, j48);
numOfRelInTreeAll[f]=nC[0]; //number of relational feat in tree
sumOfConstrRelAll[f]=nC[1]; //sum of constructs (relational features) in tree
nC=numOfCartFeatInTree(trainFold, data.numAttributes()-1, j48);
numOfCartFAll[f]=nC[0]; //number of Cartesian features in tree
sumOfConstrCartAll[f]=nC[1]; //sum of constructs (Cartesian features) in tree
int furiaThrC[]=numOfDrThrFeatInTree(trainFold, data.numAttributes()-1, j48);
//if we don't get any constructs than we take 0 from originally initialized dataset - we don't take value from original dataset because there is no constructs
numOfFuriaThrInTreeByFolds[0][f]=furiaThrC[0];
numOfFuriaThrInTreeByFolds[1][f]=furiaThrC[1];
numOfFuriaThrInTreeByFolds[2][f]=furiaThrC[2];
numOfFuriaThrInTreeByFolds[3][f]=furiaThrC[3];
}
if(excludeUppers(model.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=(FURIA)(model);
numOfRulesByFolds[f]=fu.getRuleset().size();
numOfTermsByFoldsF[f]=sumOfTermsInConstrInRule(fu.getRuleset(),trainFold);
numOfRatioByFoldsF[f]=numOfRulesByFolds[f]==0 ? 0 : (numOfTermsByFoldsF[f]/numOfRulesByFolds[f]);
}
}
if(!exhaustive){
if(f==0){
String pctSplitT;
switch(splitTrain){ //5 ... 80%:20%, 4 ... 75%25%, 3 ... 66%:33%
case 5: pctSplitT="Percentage of split (train:test) 80%:20%"; break;
case 4: pctSplitT="Percentage of split (train:test) 75%:25%"; break;
case 3: pctSplitT="Percentage of split (train:test) 66%:33%"; break;
default: pctSplitT="Number of split is not right!!!";
}
logFile.println("Testing method: "+((folds>1) ? "CV, number of folds: "+folds : pctSplitT));
bestParamPerFold.println("Testing method: "+((folds>1) ? "CV, number of folds: "+folds : pctSplitT));
String pctSplitTFS;
switch(splitTrainFS){ //5 ... 80%:20%, 4 ... 75%25%, 3 ... 66%:33%
case 5: pctSplitTFS="Percentage of split (subTrain:validation) 80%:20%"; break;
case 4: pctSplitTFS="Percentage of split (subTrain:validation) 75%:25%"; break;
case 3: pctSplitTFS="Percentage of split (subTrain:validation) 66%:33%"; break;
default: pctSplitTFS="Number of split is not right!!!";
}
logFile.println("Feature selection on validation dataset. "+pctSplitTFS);
bestParamPerFold.println("Feature selection on validation dataset. "+pctSplitTFS);
}
for(int i=0;i<clsTab.length;i++){
ParamSearchEval pse;
pse=paramSearch(trainFold, testFold, clsTab[i],data.numAttributes()-1,splitTrainFS, rCaller, code);
accuracyByFoldsPS[i][f]=pse.getAcc();
//we need info for different classifiers
featByFoldsPS[0][f][i]=pse.getFeat()[0]; //logical
featByFoldsPS[1][f][i]=pse.getFeat()[1]; //threshold
featByFoldsPS[2][f][i]=pse.getFeat()[2]; //decision rule (FURIA)
featByFoldsPS[3][f][i]=pse.getFeat()[3]; //Cartesian
featByFoldsPS[4][f][i]=pse.getFeat()[4]; //relational
featByFoldsPS[5][f][i]=pse.getFeat()[5]; //numerical
paramSearchTime[i][f]=pse.getTime()[0];
paramSLearnT[i][f]=pse.getTime()[1];
if(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("J48")){ //0-tree size, 1-number of leaves, 3-sum of constructs
numberOfTreeByFoldsPS[0][f]=pse.getTree()[0];
numberOfTreeByFoldsPS[1][f]=pse.getTree()[1];
numberOfTreeByFoldsPS[2][f]=pse.getTree()[2];
numberOfTreeByFoldsPS[3][f]=numberOfTreeByFoldsPS[2][f]/(numberOfTreeByFoldsPS[0][f]-numberOfTreeByFoldsPS[1][f]);
numLogFeatInTreeFS[0][f]=pse.getNumLogFeatInTree()[0];
numLogFeatInTreeFS[1][f]=pse.getNumLogFeatInTree()[1];
if(numerFeat){
numNumFeatInTreeFS[0][f]=pse.getNumFeatInTree()[0];
numNumFeatInTreeFS[1][f]=pse.getNumFeatInTree()[1];
}
numRelFeatInTreeFS[0][f]=pse.getRelFeatInTree()[0];
numRelFeatInTreeFS[1][f]=pse.getRelFeatInTree()[1];
numCartFeatInTreeFS[0][f]=pse.getCartFeatInTree()[0];
numCartFeatInTreeFS[1][f]=pse.getCartFeatInTree()[1];
numOfFuriaThrInTreeByFoldsP[0][f]=pse.getFuriaThrComplx()[0];
numOfFuriaThrInTreeByFoldsP[1][f]=pse.getFuriaThrComplx()[1];
numOfFuriaThrInTreeByFoldsP[2][f]=pse.getFuriaThrComplx()[2];
numOfFuriaThrInTreeByFoldsP[3][f]=pse.getFuriaThrComplx()[3];
}
if(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")){
complexityOfFuriaPS[0][f]=pse.getComplexityFuria()[0];
complexityOfFuriaPS[1][f]=pse.getComplexityFuria()[1];
complexityOfFuriaPS[2][f]=complexityOfFuriaPS[0][f]==0 ? 0 : (complexityOfFuriaPS[1][f]/complexityOfFuriaPS[0][f]);
}
}
}
} //Jakulin's method or EFC
} //end FOR loop for folds
DecimalFormat df = new DecimalFormat("0.00");
/**************** Write ACC by folds into files ******************/
if(writeAccByFoldsInFile && (!visualisation && !justExplain && !exhaustive && !jakulin)){
for(int i=0;i<clsTab.length;i++){
accByFolds=new PrintWriter(new FileWriter(folderName+clsTab[i].getClass().getSimpleName()+"-byFolds-"+lg+".csv"));
if(numerFeat)
accByFolds.println("FoldNo;Base;Log;Num;Rel;Cart;DrThr;All;FS");
else
accByFolds.println("FoldNo;Base;Log;Rel;Cart;DrThr;All;FS");
for(int f=0;f<folds;f++){
if(numerFeat)
accByFolds.println("Fold"+(f+1)+";"+df.format(accOrigModelByFolds[i][f])+";"+df.format(accByFoldsLF[i][f])+";"+ df.format(accByFoldsNum[i][f])+";"+df.format(accByFoldsRE[i][f])+";"+df.format(accByFoldsCP[i][f])+";"+df.format(accuracyByFoldsFuriaThr[i][f])+";"+df.format(accuracyByFolds[i][f])+";"+df.format(accuracyByFoldsPS[i][f]));
else
accByFolds.println("Fold"+(f+1)+";"+df.format(accOrigModelByFolds[i][f])+";"+df.format(accByFoldsLF[i][f])+";"+df.format(accByFoldsRE[i][f])+";"+df.format(accByFoldsCP[i][f])+";"+df.format(accuracyByFoldsFuriaThr[i][f])+";"+df.format(accuracyByFolds[i][f])+";"+df.format(accuracyByFoldsPS[i][f]));
}
accByFolds.close();
}
}
/****************************************************************/
if(!jakulin){
if(!exhaustive){
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
System.out.println("Avg. explanation time: "+df.format(mean(exlpTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(exlpTime,mean(exlpTime))))+")");
logFile.println("Avg. explanation time: "+df.format(mean(exlpTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(exlpTime,mean(exlpTime))))+")");
System.out.println("Avg. number of instances that we explain: "+df.format(mean(numOfExplainedInst))+" (stdev "+df.format(Math.sqrt(var(numOfExplainedInst,mean(numOfExplainedInst))))+")");
logFile.println("Avg. number of instances that we explain: "+df.format(mean(numOfExplainedInst))+" (stdev "+df.format(Math.sqrt(var(numOfExplainedInst,mean(numOfExplainedInst))))+")");
}
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
if(!exhaustive){
if(treeSHAP){
System.out.println("Internal (during building) accuracy of explanation model: "+df.format(mean(accExplAlgInt))+" (stdev "+df.format(Math.sqrt(var(accExplAlgInt,mean(accExplAlgInt))))+") ACC on the test dataset: "+df.format(mean(accExplAlgTest))+" (stdev "+df.format(Math.sqrt(var(accExplAlgTest,mean(accExplAlgTest))))+")");
logFile.println("Internal (during building) accuracy of explanation model: "+df.format(mean(accExplAlgInt))+" (stdev "+df.format(Math.sqrt(var(accExplAlgInt,mean(accExplAlgInt))))+") ACC on the test dataset: "+df.format(mean(accExplAlgTest))+" (stdev "+df.format(Math.sqrt(var(accExplAlgTest,mean(accExplAlgTest))))+")");
}
else{
System.out.println("ACC on the test dataset: "+df.format(mean(accExplAlgTest))+" stdev "+df.format(Math.sqrt(var(accExplAlgTest,mean(accExplAlgTest))))+(excludeUppers(predictionModel.getClass().getSimpleName()).equals("RF")?" ACC RF OOB: "+df.format(mean(oobRF))+" stdev "+df.format(Math.sqrt(var(oobRF,mean(oobRF)))):""));
logFile.println("ACC on the test dataset: "+df.format(mean(accExplAlgTest))+" stdev "+df.format(Math.sqrt(var(accExplAlgTest,mean(accExplAlgTest))))+(excludeUppers(predictionModel.getClass().getSimpleName()).equals("RF")?" ACC RF OOB: "+df.format(mean(oobRF))+" stdev "+df.format(Math.sqrt(var(oobRF,mean(oobRF)))):""));
}
}
if(!exhaustive){
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
System.out.println("Avg. model building time: "+df.format(mean(modelBuildTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(modelBuildTime,mean(modelBuildTime))))+")");
logFile.println("Avg. model building time: "+df.format(mean(modelBuildTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(modelBuildTime,mean(modelBuildTime))))+")");
}
if(!jakulin){
System.out.println("---------------------------------------------------------------------------------");
logFile.println("---------------------------------------------------------------------------------");
System.out.println("Feature types included in construction: " + (logFeat ? "logical operators based, " : "") + (decRuleFeat ? "decision rule, " : "") + (thrFeat ? "threshold, " : "") + (relatFeat ? "relational, " : "") + (cartFeat ? "Cartesian product, " : "")+(numerFeat ? "numerical" : ""));
logFile.println("Feature types included in construction: " + (logFeat ? "logical operators based, " : "") + (decRuleFeat ? "decision rule, " : "") + (thrFeat ? "threshold, " : "") + (relatFeat ? "relational, " : "") + (cartFeat ? "Cartesian product, " : "")+(numerFeat ? "numerical" : ""));
}
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Original dataset");
logFile.println("Original dataset");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Avg. tree size: "+df.format(mean(treeSize))+" (stdev "+df.format(Math.sqrt(var(treeSize,mean(treeSize))))+")"+" avg. number of leaves: "+df.format(mean(numOfLeaves))+" (stdev "+df.format(Math.sqrt(var(numOfLeaves,mean(numOfLeaves))))+")"+" avg. number of nodes: "+df.format(mean(sumOfTerms))+" (stdev "+df.format(Math.sqrt(var(sumOfTerms,mean(sumOfTerms))))+")");
logFile.println("Avg. tree size: "+df.format(mean(treeSize))+" (stdev "+df.format(Math.sqrt(var(treeSize,mean(treeSize))))+")"+" avg. number of leaves: "+df.format(mean(numOfLeaves))+" (stdev "+df.format(Math.sqrt(var(numOfLeaves,mean(numOfLeaves))))+")"+" avg. number of nodes: "+df.format(mean(sumOfTerms))+" (stdev "+df.format(Math.sqrt(var(sumOfTerms,mean(sumOfTerms))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRules))+" (stdev "+df.format(Math.sqrt(var(numOfRules,mean(numOfRules))))+")"+" avg. number of attributes in rules: "+ df.format(mean(numOfTerms))+" (stdev "+df.format(Math.sqrt(var(numOfTerms,mean(numOfTerms))))+")"+" avg. number of attributes per rule: "+df.format(mean(numConstructsPerRule)) +" (stdev "+df.format(Math.sqrt(var(numConstructsPerRule,mean(numConstructsPerRule))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRules))+" (stdev "+df.format(Math.sqrt(var(numOfRules,mean(numOfRules))))+")"+" avg. number of attributes in rules: "+ df.format(mean(numOfTerms))+" (stdev "+df.format(Math.sqrt(var(numOfTerms,mean(numOfTerms))))+") avg. number of attributes per rule: "+df.format(mean(numConstructsPerRule)) +" (stdev "+df.format(Math.sqrt(var(numConstructsPerRule,mean(numConstructsPerRule))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(accOrigModelByFolds[i]))+" (stdev "+ df.format(Math.sqrt(var(accOrigModelByFolds[i],mean(accOrigModelByFolds[i]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(accOrigModelByFolds[i]))+" (stdev "+ df.format(Math.sqrt(var(accOrigModelByFolds[i],mean(accOrigModelByFolds[i]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllTime[i]))+" [ms] (stdev "+df.format(Math.sqrt(var(learnAllTime[i],mean(learnAllTime[i]))))+" [ms])");
logFile.println("Avg. learning time "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllTime[i]))+" [ms] (stdev "+df.format(Math.sqrt(var(learnAllTime[i],mean(learnAllTime[i]))))+" [ms])");
}
}
if(!jakulin){
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Only Logical features");
logFile.println("Only Logical features");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Number of logical feat: "+df.format(mean(numOfFeatByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfFeatByFoldsLF,mean(numOfFeatByFoldsLF))))+")");
logFile.println("Number of logical feat: "+df.format(mean(numOfFeatByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfFeatByFoldsLF,mean(numOfFeatByFoldsLF))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsLF[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[0],mean(numOfTreeByFoldsLF[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsLF[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[1],mean(numOfTreeByFoldsLF[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsLF[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[2],mean(numOfTreeByFoldsLF[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsLF[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[3],mean(numOfTreeByFoldsLF[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsLF[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[0],mean(numOfTreeByFoldsLF[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsLF[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[1],mean(numOfTreeByFoldsLF[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsLF[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[2],mean(numOfTreeByFoldsLF[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsLF[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsLF[3],mean(numOfTreeByFoldsLF[3]))))+")");
System.out.println("Avg. num of logical feat in tree: "+df.format(mean(numOfLogicalInTree[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfLogicalInTree[0],mean(numOfLogicalInTree[0]))))+")"+" avg. sum of (logical) constructs: "+df.format(mean(numOfLogicalInTree[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfLogicalInTree[1],mean(numOfLogicalInTree[1]))))+")");
logFile.println("Avg. num of logical feat in tree: "+df.format(mean(numOfLogicalInTree[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfLogicalInTree[0],mean(numOfLogicalInTree[0]))))+")"+" avg. sum of (logical) constructs: "+df.format(mean(numOfLogicalInTree[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfLogicalInTree[1],mean(numOfLogicalInTree[1]))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsLF,mean(numOfRulesByFoldsLF))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsLF,mean(numOfTermsByFoldsLF))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsLF)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsLF,mean(numOfRatioByFoldsLF))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsLF,mean(numOfRulesByFoldsLF))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsLF))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsLF,mean(numOfTermsByFoldsLF))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsLF)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsLF,mean(numOfRatioByFoldsLF))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsLF[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsLF[c],mean(accByFoldsLF[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsLF[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsLF[c],mean(accByFoldsLF[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeLF[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeLF[i],mean(learnAllFCTimeLF[i]))))+")");
logFile.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeLF[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeLF[i],mean(learnAllFCTimeLF[i]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (all feat): "+df.format(mean(allFCTimeLF))+" [ms] stdev "+ df.format(Math.sqrt(var(allFCTimeLF,mean(allFCTimeLF)))));
logFile.println("Avg. FC time (all feat): "+df.format(mean(allFCTimeLF))+" [ms] stdev "+ df.format(Math.sqrt(var(allFCTimeLF,mean(allFCTimeLF)))));
}
if(!jakulin && numerFeat){
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Numerical features");
logFile.println("Numerical features");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Number of numerical feat: "+df.format(mean(numFeatByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsNum,mean(numFeatByFoldsNum))))+")");
logFile.println("Number of numerical feat: "+df.format(mean(numFeatByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsNum,mean(numFeatByFoldsNum))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsNum[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[0],mean(numOfTreeByFoldsNum[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsNum[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[1],mean(numOfTreeByFoldsNum[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsNum[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[2],mean(numOfTreeByFoldsNum[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsNum[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[3],mean(numOfTreeByFoldsNum[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsNum[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[0],mean(numOfTreeByFoldsNum[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsNum[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[1],mean(numOfTreeByFoldsNum[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsNum[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[2],mean(numOfTreeByFoldsNum[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsNum[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsNum[3],mean(numOfTreeByFoldsNum[3]))))+")");
System.out.println("Avg. num of numerical feat in tree: "+df.format(mean(numOfNumerical))+" (stdev "+ df.format(Math.sqrt(var(numOfNumerical,mean(numOfNumerical))))+")"+" avg. sum of (only numerical) constructs (in tree): "+ df.format(mean(sumOfConstrNum))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrNum,mean(sumOfConstrNum))))+")");
logFile.println("Avg. num of numerical feat in tree: "+df.format(mean(numOfNumerical))+" (stdev "+ df.format(Math.sqrt(var(numOfNumerical,mean(numOfNumerical))))+")"+" avg. sum of (only numerical) constructs (in tree): "+ df.format(mean(sumOfConstrNum))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrNum,mean(sumOfConstrNum))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsNum,mean(numOfRulesByFoldsNum))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsNum,mean(numOfTermsByFoldsNum))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsNum)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsNum,mean(numOfRatioByFoldsNum))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsNum,mean(numOfRulesByFoldsNum))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsNum))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsNum,mean(numOfTermsByFoldsNum))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsNum)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsNum,mean(numOfRatioByFoldsNum))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsNum[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsNum[c],mean(accByFoldsNum[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsNum[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsNum[c],mean(accByFoldsNum[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeNum[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeNum[i],mean(learnAllFCTimeNum[i]))))+")");
logFile.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeNum[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeNum[i],mean(learnAllFCTimeNum[i]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (all feat): "+df.format(mean(numericalFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(numericalFCTime,mean(numericalFCTime)))));
logFile.println("Avg. FC time (all feat): "+df.format(mean(numericalFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(numericalFCTime,mean(numericalFCTime)))));
}
if(!jakulin){
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Relational features");
logFile.println("Relational features");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Number of relational feat: "+df.format(mean(numFeatByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsRE,mean(numFeatByFoldsRE))))+")");
logFile.println("Number of relational feat: "+df.format(mean(numFeatByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsRE,mean(numFeatByFoldsRE))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsRE[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[0],mean(numOfTreeByFoldsRE[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsRE[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[1],mean(numOfTreeByFoldsRE[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsRE[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[2],mean(numOfTreeByFoldsRE[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsRE[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[3],mean(numOfTreeByFoldsRE[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsRE[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[0],mean(numOfTreeByFoldsRE[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsRE[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[1],mean(numOfTreeByFoldsRE[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsRE[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[2],mean(numOfTreeByFoldsRE[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsRE[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsRE[3],mean(numOfTreeByFoldsRE[3]))))+")");
System.out.println("Avg. num of relational feat in tree: "+df.format(mean(numOfRelational))+" (stdev "+ df.format(Math.sqrt(var(numOfRelational,mean(numOfRelational))))+")"+" avg. sum of (only Cartesian) constructs (in tree): "+ df.format(mean(sumOfConstrRel))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrRel,mean(sumOfConstrRel))))+")");
logFile.println("Avg. num of relational feat in tree: "+df.format(mean(numOfRelational))+" (stdev "+ df.format(Math.sqrt(var(numOfRelational,mean(numOfRelational))))+")"+" avg. sum of (only Cartesian) constructs (in tree): "+ df.format(mean(sumOfConstrRel))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrRel,mean(sumOfConstrRel))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsRE,mean(numOfRulesByFoldsRE))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsRE,mean(numOfTermsByFoldsRE))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsRE)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsRE,mean(numOfRatioByFoldsRE))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsRE,mean(numOfRulesByFoldsRE))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsRE))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsRE,mean(numOfTermsByFoldsRE))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsRE)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsRE,mean(numOfRatioByFoldsRE))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsRE[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsRE[c],mean(accByFoldsRE[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsRE[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsRE[c],mean(accByFoldsRE[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeRE[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeRE[i],mean(learnAllFCTimeRE[i]))))+")");
logFile.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeRE[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeRE[i],mean(learnAllFCTimeRE[i]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (all feat): "+df.format(mean(relationalFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(relationalFCTime,mean(relationalFCTime)))));
logFile.println("Avg. FC time (all feat): "+df.format(mean(relationalFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(relationalFCTime,mean(relationalFCTime)))));
}
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
if(!jakulin){
System.out.println("Cartesian product");
logFile.println("Cartesian product");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
}
else{
System.out.println("Jakulin's interaction information");
logFile.println("Jakulin's interaction information");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
}
System.out.println("Number of \"Cartesian\" feat: "+df.format(mean(numFeatByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsCP,mean(numFeatByFoldsCP))))+")");
logFile.println("Number of \"Cartesian\" feat: "+df.format(mean(numFeatByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsCP,mean(numFeatByFoldsCP))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsCP[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[0],mean(numOfTreeByFoldsCP[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsCP[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[1],mean(numOfTreeByFoldsCP[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsCP[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[2],mean(numOfTreeByFoldsCP[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsCP[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[3],mean(numOfTreeByFoldsCP[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numOfTreeByFoldsCP[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[0],mean(numOfTreeByFoldsCP[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numOfTreeByFoldsCP[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[1],mean(numOfTreeByFoldsCP[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numOfTreeByFoldsCP[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[2],mean(numOfTreeByFoldsCP[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numOfTreeByFoldsCP[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfTreeByFoldsCP[3],mean(numOfTreeByFoldsCP[3]))))+")");
System.out.println("Avg. num of \"Cartesian\" feat in tree: "+df.format(mean(numOfCartesian))+" (stdev "+ df.format(Math.sqrt(var(numOfCartesian,mean(numOfCartesian))))+")"+" avg. sum of (only Cartesian) constructs (in tree): "+ df.format(mean(sumOfConstrCart))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrCart,mean(sumOfConstrCart))))+")");
logFile.println("Avg. num of \"Cartesian\" feat in tree: "+df.format(mean(numOfCartesian))+" (stdev "+ df.format(Math.sqrt(var(numOfCartesian,mean(numOfCartesian))))+")"+" avg. sum of (only Cartesian) constructs (in tree): "+ df.format(mean(sumOfConstrCart))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrCart,mean(sumOfConstrCart))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsCP,mean(numOfRulesByFoldsCP))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsCP,mean(numOfTermsByFoldsCP))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsCP)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsCP,mean(numOfRatioByFoldsCP))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFoldsCP,mean(numOfRulesByFoldsCP))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsCP))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsCP,mean(numOfTermsByFoldsCP))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsCP)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsCP,mean(numOfRatioByFoldsCP))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsCP[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsCP[c],mean(accByFoldsCP[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accByFoldsCP[c]))+" (stdev "+ df.format(Math.sqrt(var(accByFoldsCP[c],mean(accByFoldsCP[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeCP[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeCP[i],mean(learnAllFCTimeCP[i]))))+")");
logFile.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTimeCP[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTimeCP[i],mean(learnAllFCTimeCP[i]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (all feat): "+df.format(mean(cartesianFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(cartesianFCTime,mean(cartesianFCTime)))));
logFile.println("Avg. FC time (all feat): "+df.format(mean(cartesianFCTime))+" [ms] stdev "+ df.format(Math.sqrt(var(cartesianFCTime,mean(cartesianFCTime)))));
if(!jakulin){
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Only Furia and THR feat");
logFile.println("Only Furia and THR feat");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Number of FURIA feat: "+ df.format(mean(numFeatByFoldsFuriaThr[1]))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsFuriaThr[1],mean(numFeatByFoldsFuriaThr[1]))))+")"+" number of thr. feat: "+ df.format(mean(numFeatByFoldsFuriaThr[0]))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsFuriaThr[0],mean(numFeatByFoldsFuriaThr[0]))))+")");
logFile.println("Number of FURIA feat: "+ df.format(mean(numFeatByFoldsFuriaThr[1]))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsFuriaThr[1],mean(numFeatByFoldsFuriaThr[1]))))+")"+" number of thr. feat: "+ df.format(mean(numFeatByFoldsFuriaThr[0]))+" (stdev "+ df.format(Math.sqrt(var(numFeatByFoldsFuriaThr[0],mean(numFeatByFoldsFuriaThr[0]))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numTreeByFoldsFuriaThr[0]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[0],mean(numTreeByFoldsFuriaThr[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numTreeByFoldsFuriaThr[1]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[1],mean(numTreeByFoldsFuriaThr[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numTreeByFoldsFuriaThr[2]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[2],mean(numTreeByFoldsFuriaThr[2]))))+") avg. sum of constructs / num of nodes: "+df.format(mean(numTreeByFoldsFuriaThr[3]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[3],mean(numTreeByFoldsFuriaThr[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numTreeByFoldsFuriaThr[0]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[0],mean(numTreeByFoldsFuriaThr[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numTreeByFoldsFuriaThr[1]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[1],mean(numTreeByFoldsFuriaThr[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numTreeByFoldsFuriaThr[2]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[2],mean(numTreeByFoldsFuriaThr[2]))))+") avg. sum of constructs / num of nodes: "+df.format(mean(numTreeByFoldsFuriaThr[3]))+" (stdev "+ df.format(Math.sqrt(var(numTreeByFoldsFuriaThr[3],mean(numTreeByFoldsFuriaThr[3]))))+")");
System.out.println("Avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsF[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[0],mean(numOfFuriaThrInTreeByFoldsF[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsF[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[1],mean(numOfFuriaThrInTreeByFoldsF[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsF[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[2],mean(numOfFuriaThrInTreeByFoldsF[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsF[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[3],mean(numOfFuriaThrInTreeByFoldsF[3]))))+")");
logFile.println("Avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsF[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[0],mean(numOfFuriaThrInTreeByFoldsF[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsF[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[1],mean(numOfFuriaThrInTreeByFoldsF[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsF[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[2],mean(numOfFuriaThrInTreeByFoldsF[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsF[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsF[3],mean(numOfFuriaThrInTreeByFoldsF[3]))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(complexityOfFuria[0]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuria[0],mean(complexityOfFuria[0]))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(complexityOfFuria[1]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuria[1],mean(complexityOfFuria[1]))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(complexityOfFuria[2])) +" (stdev "+df.format(Math.sqrt(var(complexityOfFuria[2],mean(complexityOfFuria[2]))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(complexityOfFuria[0]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuria[0],mean(complexityOfFuria[0]))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(complexityOfFuria[1]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuria[1],mean(complexityOfFuria[1]))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(complexityOfFuria[2])) +" (stdev "+df.format(Math.sqrt(var(complexityOfFuria[2],mean(complexityOfFuria[2]))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFoldsFuriaThr[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFoldsFuriaThr[c],mean(accuracyByFoldsFuriaThr[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFoldsFuriaThr[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFoldsFuriaThr[c],mean(accuracyByFoldsFuriaThr[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. learning time from FC(only Furia and THR feat) "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" "+df.format(mean(learnFuriaThrTime[c]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnFuriaThrTime[c],mean(learnFuriaThrTime[c]))))+")");
logFile.println("Avg. learning time from FC(only Furia and THR feat) "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" "+df.format(mean(learnFuriaThrTime[c]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnFuriaThrTime[c],mean(learnFuriaThrTime[c]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (only Furia and THR feat): "+df.format(mean(furiaThrTime))+" [ms] stdev "+ df.format(Math.sqrt(var(furiaThrTime,mean(furiaThrTime)))));
logFile.println("Avg. FC time (only Furia and THR feat): "+df.format(mean(furiaThrTime))+" [ms] stdev "+ df.format(Math.sqrt(var(furiaThrTime,mean(furiaThrTime)))));
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("All features dataset");
logFile.println("All features dataset");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("Avg. number of groups for feature construction: "+df.format(mean(numOfGroupsOfFeatConstr))+" (stdev "+ df.format(Math.sqrt(var(numOfGroupsOfFeatConstr,mean(numOfGroupsOfFeatConstr))))+")");
logFile.println("Avg. number of groups for feature construction: "+df.format(mean(numOfGroupsOfFeatConstr))+" (stdev "+ df.format(Math.sqrt(var(numOfGroupsOfFeatConstr,mean(numOfGroupsOfFeatConstr))))+")");
System.out.println("Avg. size of groups (num. of candidate attr.) per folds (unm of attr. in all groups): "+df.format(mean(avgTermsPerFold))+" (stdev "+ df.format(Math.sqrt(var(avgTermsPerFold,mean(avgTermsPerFold))))+")");
logFile.println("Avg. size of groups (num. of candidate attr.) per folds (unm of attr. in all groups):: "+df.format(mean(avgTermsPerFold))+" (stdev "+ df.format(Math.sqrt(var(avgTermsPerFold,mean(avgTermsPerFold))))+")");
System.out.println("Avg. size of groups (num. of candidate attr.) per groups per folds (avg. of avg.): "+df.format(mean(avgTermsPerGroup))+" (stdev "+ df.format(Math.sqrt(var(avgTermsPerGroup,mean(avgTermsPerGroup))))+")");
logFile.println("Avg. size of groups (num. of candidate attr.) per groups per folds (avg. of avg.): "+df.format(mean(avgTermsPerGroup))+" (stdev "+ df.format(Math.sqrt(var(avgTermsPerGroup,mean(avgTermsPerGroup))))+")");
System.out.println("Max avg. group (length) of constructs (num of attr.): "+df.format(mean(maxGroupOfConstructs))+" (stdev "+ df.format(Math.sqrt(var(maxGroupOfConstructs,mean(maxGroupOfConstructs))))+")");
logFile.println("Max avg. group (length) of constructs (num of attr.): "+df.format(mean(maxGroupOfConstructs))+" (stdev "+ df.format(Math.sqrt(var(maxGroupOfConstructs,mean(maxGroupOfConstructs))))+")");
System.out.println("Number of logical feat: "+df.format(mean(numberOfFeatByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[0],mean(numberOfFeatByFolds[0]))))+")"+(numerFeat ?(" number of numerical feat: "+df.format(mean(numberOfFeatByFolds[5]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[5],mean(numberOfFeatByFolds[5]))))+")"):"")+" number of relational feat: "+df.format(mean(numberOfFeatByFolds[4]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[4],mean(numberOfFeatByFolds[4]))))+")"+" number of \"Cartesian\" feat: "+df.format(mean(numberOfFeatByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[3],mean(numberOfFeatByFolds[3]))))+")"+" number of FURIA feat: "+ df.format(mean(numberOfFeatByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[2],mean(numberOfFeatByFolds[2]))))+")"+" number of thr. feat: "+ df.format(mean(numberOfFeatByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[1],mean(numberOfFeatByFolds[1]))))+")");
logFile.println("Number of logical feat: "+df.format(mean(numberOfFeatByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[0],mean(numberOfFeatByFolds[0]))))+")"+(numerFeat ?(" number of numerical feat: "+df.format(mean(numberOfFeatByFolds[5]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[5],mean(numberOfFeatByFolds[5]))))+")"):"")+" number of relational feat: "+df.format(mean(numberOfFeatByFolds[4]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[4],mean(numberOfFeatByFolds[4]))))+")"+" number of \"Cartesian\" feat: "+df.format(mean(numberOfFeatByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[3],mean(numberOfFeatByFolds[3]))))+")"+" number of FURIA feat: "+ df.format(mean(numberOfFeatByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[2],mean(numberOfFeatByFolds[2]))))+")"+" number of thr. feat: "+ df.format(mean(numberOfFeatByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfFeatByFolds[1],mean(numberOfFeatByFolds[1]))))+")");
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numberOfTreeByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[0],mean(numberOfTreeByFolds[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numberOfTreeByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[1],mean(numberOfTreeByFolds[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numberOfTreeByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[2],mean(numberOfTreeByFolds[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numberOfTreeByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[3],mean(numberOfTreeByFolds[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numberOfTreeByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[0],mean(numberOfTreeByFolds[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numberOfTreeByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[1],mean(numberOfTreeByFolds[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numberOfTreeByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[2],mean(numberOfTreeByFolds[2]))))+")"+ " avg. sum of constructs / num of nodes: "+df.format(mean(numberOfTreeByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFolds[3],mean(numberOfTreeByFolds[3]))))+")");
System.out.println("Avg. num of logical feat in tree: "+df.format(mean(numOfLogInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfLogInTreeAll,mean(numOfLogInTreeAll))))+")"+" avg. sum of (only logical) constructs: "+df.format(mean(sumOfConstrLFAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrLFAll,mean(sumOfConstrLFAll))))+")"+(numerFeat ? " avg. num of numerical feat in tree: "+df.format(mean(numOfNumInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfNumInTreeAll,mean(numOfNumInTreeAll))))+")"+" avg. sum of (only numerical) constructs: "+df.format(mean(sumOfConstrNumAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrNumAll,mean(sumOfConstrNumAll))))+")" :"")+" avg. num of relational feat in tree: "+df.format(mean(numOfRelInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfRelInTreeAll,mean(numOfRelInTreeAll))))+")"+" avg. sum of (only relational) constructs: "+df.format(mean(sumOfConstrRelAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrRelAll,mean(sumOfConstrRelAll))))+")"+" avg. num of \"Cartesian\" feat in tree: "+df.format(mean(numOfCartFAll))+" (stdev "+ df.format(Math.sqrt(var(numOfCartFAll,mean(numOfCartFAll))))+")"+" avg. sum of (Cartesian) constructs (in tree): "+df.format(mean(sumOfConstrCartAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrCartAll,mean(sumOfConstrCartAll))))+")"+" avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[0],mean(numOfFuriaThrInTreeByFolds[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[1],mean(numOfFuriaThrInTreeByFolds[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[2],mean(numOfFuriaThrInTreeByFolds[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[3],mean(numOfFuriaThrInTreeByFolds[3]))))+")");
logFile.println("Avg. num of logical feat in tree: "+df.format(mean(numOfLogInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfLogInTreeAll,mean(numOfLogInTreeAll))))+")"+" avg. sum of (only logical) constructs: "+df.format(mean(sumOfConstrLFAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrLFAll,mean(sumOfConstrLFAll))))+")"+(numerFeat ? " avg. num of numerical feat in tree: "+df.format(mean(numOfNumInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfNumInTreeAll,mean(numOfNumInTreeAll))))+")"+" avg. sum of (only numerical) constructs: "+df.format(mean(sumOfConstrNumAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrNumAll,mean(sumOfConstrNumAll))))+")" :"")+" avg. num of relational feat in tree: "+df.format(mean(numOfRelInTreeAll))+" (stdev "+ df.format(Math.sqrt(var(numOfRelInTreeAll,mean(numOfRelInTreeAll))))+")"+" avg. sum of (only relational) constructs: "+df.format(mean(sumOfConstrRelAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrRelAll,mean(sumOfConstrRelAll))))+")"+" avg. num of \"Cartesian\" feat in tree: "+df.format(mean(numOfCartFAll))+" (stdev "+ df.format(Math.sqrt(var(numOfCartFAll,mean(numOfCartFAll))))+")"+" avg. sum of (Cartesian) constructs (in tree): "+df.format(mean(sumOfConstrCartAll))+" (stdev "+ df.format(Math.sqrt(var(sumOfConstrCartAll,mean(sumOfConstrCartAll))))+")"+" avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[0],mean(numOfFuriaThrInTreeByFolds[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[1],mean(numOfFuriaThrInTreeByFolds[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[2],mean(numOfFuriaThrInTreeByFolds[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFolds[3],mean(numOfFuriaThrInTreeByFolds[3]))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFolds))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFolds,mean(numOfRulesByFolds))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsF))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsF,mean(numOfTermsByFoldsF))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsF)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsF,mean(numOfRatioByFoldsF))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(numOfRulesByFolds))+" (stdev "+ df.format(Math.sqrt(var(numOfRulesByFolds,mean(numOfRulesByFolds))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(numOfTermsByFoldsF))+" (stdev "+ df.format(Math.sqrt(var(numOfTermsByFoldsF,mean(numOfTermsByFoldsF))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(numOfRatioByFoldsF)) +" (stdev "+df.format(Math.sqrt(var(numOfRatioByFoldsF,mean(numOfRatioByFoldsF))))+")");
System.out.println("Mean unimp. OR feat.: "+df.format(mean(numberOfUnImpFeatByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[0],mean(numberOfUnImpFeatByFolds[0]))))+")"+" mean unimp. EQU feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[1],mean(numberOfUnImpFeatByFolds[1]))))+")"+" mean unimp. XOR feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[2],mean(numberOfUnImpFeatByFolds[2]))))+")"+" mean unimp. IMPL feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[3],mean(numberOfUnImpFeatByFolds[3]))))+")"+" mean unimp. AND feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[4]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[4],mean(numberOfUnImpFeatByFolds[4]))))+")"+" mean unimp. LESSTHAN feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[5]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[5],mean(numberOfUnImpFeatByFolds[5]))))+")"+" mean unimp. DIFF feat.: "+df.format(mean(numberOfUnImpFeatByFolds[6]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[6],mean(numberOfUnImpFeatByFolds[6]))))+")"+" mean unimp. \"Cartesian\" feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[7]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[7],mean(numberOfUnImpFeatByFolds[7]))))+")");
logFile.println("Mean unimp. OR feat.: "+df.format(mean(numberOfUnImpFeatByFolds[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[0],mean(numberOfUnImpFeatByFolds[0]))))+")"+" mean unimp. EQU feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[1],mean(numberOfUnImpFeatByFolds[1]))))+")"+" mean unimp. XOR feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[2],mean(numberOfUnImpFeatByFolds[2]))))+")"+" mean unimp. IMPL feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[3],mean(numberOfUnImpFeatByFolds[3]))))+")"+" mean unimp. AND feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[4]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[4],mean(numberOfUnImpFeatByFolds[4]))))+")"+" mean unimp. LESSTHAN feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[5]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[5],mean(numberOfUnImpFeatByFolds[5]))))+")"+" mean unimp. DIFF feat.: "+df.format(mean(numberOfUnImpFeatByFolds[6]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[6],mean(numberOfUnImpFeatByFolds[6]))))+")"+" mean unimp. \"Cartesian\" feat.: "+ df.format(mean(numberOfUnImpFeatByFolds[7]))+" (stdev "+ df.format(Math.sqrt(var(numberOfUnImpFeatByFolds[7],mean(numberOfUnImpFeatByFolds[7]))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFolds[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFolds[c],mean(accuracyByFolds[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFolds[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFolds[c],mean(accuracyByFolds[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for (int i=0;i<clsTab.length;i++){
System.out.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTime[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTime[i],mean(learnAllFCTime[i]))))+")");
logFile.println("Avg. learning time from FC (all feat), for "+(excludeUppers(clsTab[i].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[i].getClass().getSimpleName()))+" \t"+df.format(mean(learnAllFCTime[i]))+" [ms] (stdev "+ df.format(Math.sqrt(var(learnAllFCTime[i],mean(learnAllFCTime[i]))))+")");
}
System.out.println("-----Feature construction time-----");
logFile.println("-----Feature construction time-----");
System.out.println("Avg. FC time (all feat): "+df.format(mean(allFCTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(allFCTime,mean(allFCTime))))+")");
logFile.println("Avg. FC time (all feat): "+df.format(mean(allFCTime))+" [ms] (stdev "+ df.format(Math.sqrt(var(allFCTime,mean(allFCTime))))+")");
}
if(!jakulin){
if(!exhaustive){
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
System.out.println("FS on validation dataset - results");
logFile.println("FS on validation dataset - results");
System.out.println("*********************************************************************************");
logFile.println("*********************************************************************************");
for(int i=0;i<clsTab.length;i++){
System.out.println("When using "+(excludeUppers(clsTab[i].getClass().getSimpleName()))+", number of logical feat: "+df.format(mean(threeDtoTwoD(featByFoldsPS,i)[0]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[0],mean(threeDtoTwoD(featByFoldsPS,i)[0]))))+")"+(numerFeat ? (" number of numerical feat: "+df.format(mean(threeDtoTwoD(featByFoldsPS,i)[5]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[5],mean(threeDtoTwoD(featByFoldsPS,i)[5]))))+")"): "")+" number of relational feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[4]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[4],mean(threeDtoTwoD(featByFoldsPS,i)[4]))))+")"+" number of \"Cartesian\" feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[3]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[3],mean(threeDtoTwoD(featByFoldsPS,i)[3]))))+")"+" number of FURIA feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[2]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[2],mean(threeDtoTwoD(featByFoldsPS,i)[2]))))+")"+" number of thr. feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[1]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[1],mean(threeDtoTwoD(featByFoldsPS,i)[1]))))+")");
logFile.println("When using "+(excludeUppers(clsTab[i].getClass().getSimpleName()))+", number of logical feat: "+df.format(mean(threeDtoTwoD(featByFoldsPS,i)[0]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[0],mean(threeDtoTwoD(featByFoldsPS,i)[0]))))+")"+(numerFeat ? (" number of numerical feat: "+df.format(mean(threeDtoTwoD(featByFoldsPS,i)[5]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[5],mean(threeDtoTwoD(featByFoldsPS,i)[5]))))+")"): "")+" number of relational feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[4]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[4],mean(threeDtoTwoD(featByFoldsPS,i)[4]))))+")"+" number of \"Cartesian\" feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[3]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[3],mean(threeDtoTwoD(featByFoldsPS,i)[3]))))+")"+" number of FURIA feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[2]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[2],mean(threeDtoTwoD(featByFoldsPS,i)[2]))))+")"+" number of thr. feat: "+ df.format(mean(threeDtoTwoD(featByFoldsPS,i)[1]))+" (stdev "+ df.format(Math.sqrt(var(threeDtoTwoD(featByFoldsPS,i)[1],mean(threeDtoTwoD(featByFoldsPS,i)[1]))))+")");
}
System.out.println("Avg. tree size (nodes+leaves): "+df.format(mean(numberOfTreeByFoldsPS[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[0],mean(numberOfTreeByFoldsPS[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numberOfTreeByFoldsPS[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[1],mean(numberOfTreeByFoldsPS[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numberOfTreeByFoldsPS[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[2],mean(numberOfTreeByFoldsPS[2]))))+") avg. sum of constructs / num of nodes: "+df.format(mean(numberOfTreeByFoldsPS[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[3],mean(numberOfTreeByFoldsPS[3]))))+")");
logFile.println("Avg. tree size (nodes+leaves): "+df.format(mean(numberOfTreeByFoldsPS[0]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[0],mean(numberOfTreeByFoldsPS[0]))))+")"+" avg. number of leaves: "+ df.format(mean(numberOfTreeByFoldsPS[1]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[1],mean(numberOfTreeByFoldsPS[1]))))+")"+" avg. sum of constructs: "+ df.format(mean(numberOfTreeByFoldsPS[2]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[2],mean(numberOfTreeByFoldsPS[2]))))+") avg. sum of constructs / num of nodes: "+df.format(mean(numberOfTreeByFoldsPS[3]))+" (stdev "+ df.format(Math.sqrt(var(numberOfTreeByFoldsPS[3],mean(numberOfTreeByFoldsPS[3]))))+")");
System.out.println("Avg. num of logical feat. in tree: "+df.format(mean(numLogFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numLogFeatInTreeFS[0],mean(numLogFeatInTreeFS[0]))))+")"+" avg. sum of (only logical) constructs in tree: "+df.format(mean(numLogFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numLogFeatInTreeFS[1],mean(numLogFeatInTreeFS[1]))))+")"+(numerFeat ? (" avg. num of numerical feat. in tree: "+df.format(mean(numNumFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numNumFeatInTreeFS[0],mean(numNumFeatInTreeFS[0]))))+")"+" avg. sum of (only numerical) constructs in tree: "+df.format(mean(numNumFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numNumFeatInTreeFS[1],mean(numNumFeatInTreeFS[1]))))+")") :"")+" avg. num of relational feat. in tree: "+df.format(mean(numRelFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numRelFeatInTreeFS[0],mean(numRelFeatInTreeFS[0]))))+")"+" avg. sum of (only relational) constructs in tree: "+df.format(mean(numRelFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numRelFeatInTreeFS[1],mean(numRelFeatInTreeFS[1]))))+")"+" avg. num of Cartesian feat. in tree: "+df.format(mean(numCartFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numCartFeatInTreeFS[0],mean(numCartFeatInTreeFS[0]))))+")"+" avg. sum of constructs (of Cartesian feat) in tree: "+df.format(mean(numCartFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numCartFeatInTreeFS[1],mean(numCartFeatInTreeFS[1]))))+")"+" avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsP[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[0],mean(numOfFuriaThrInTreeByFoldsP[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsP[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[1],mean(numOfFuriaThrInTreeByFoldsP[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsP[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[2],mean(numOfFuriaThrInTreeByFoldsP[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsP[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[3],mean(numOfFuriaThrInTreeByFoldsP[3]))))+")");
logFile.println("Avg. num of logical feat. in tree: "+df.format(mean(numLogFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numLogFeatInTreeFS[0],mean(numLogFeatInTreeFS[0]))))+")"+" avg. sum of (only logical) constructs in tree: "+df.format(mean(numLogFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numLogFeatInTreeFS[1],mean(numLogFeatInTreeFS[1]))))+")"+(numerFeat ? (" avg. num of numerical feat. in tree: "+df.format(mean(numNumFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numNumFeatInTreeFS[0],mean(numNumFeatInTreeFS[0]))))+")"+" avg. sum of (only numerical) constructs in tree: "+df.format(mean(numNumFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numNumFeatInTreeFS[1],mean(numNumFeatInTreeFS[1]))))+")") :"")+" avg. num of relational feat. in tree: "+df.format(mean(numRelFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numRelFeatInTreeFS[0],mean(numRelFeatInTreeFS[0]))))+")"+" avg. sum of (only relational) constructs in tree: "+df.format(mean(numRelFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numRelFeatInTreeFS[1],mean(numRelFeatInTreeFS[1]))))+")"+" avg. num of Cartesian feat. in tree: "+df.format(mean(numCartFeatInTreeFS[0]))+" (stdev "+ df.format(Math.sqrt(var(numCartFeatInTreeFS[0],mean(numCartFeatInTreeFS[0]))))+")"+" avg. sum of constructs (of Cartesian feat) in tree: "+df.format(mean(numCartFeatInTreeFS[1]))+" (stdev "+ df.format(Math.sqrt(var(numCartFeatInTreeFS[1],mean(numCartFeatInTreeFS[1]))))+")"+" avg. num of FURIA feat. in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsP[0]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[0],mean(numOfFuriaThrInTreeByFoldsP[0]))))+")"+" avg. sum of terms in constructs (Furia feat) in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsP[1]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[1],mean(numOfFuriaThrInTreeByFoldsP[1]))))+")"+" avg. num of THR feat. in tree: "+ df.format(mean(numOfFuriaThrInTreeByFoldsP[2]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[2],mean(numOfFuriaThrInTreeByFoldsP[2]))))+")"+" avg. sum of terms in constructs (THR feat) in tree: "+df.format(mean(numOfFuriaThrInTreeByFoldsP[3]))+" (stdev "+ df.format(Math.sqrt(var(numOfFuriaThrInTreeByFoldsP[3],mean(numOfFuriaThrInTreeByFoldsP[3]))))+")");
System.out.println("Avg. ruleset size: "+df.format(mean(complexityOfFuriaPS[0]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuriaPS[0],mean(complexityOfFuriaPS[0]))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(complexityOfFuriaPS[1]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuriaPS[1],mean(complexityOfFuriaPS[1]))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(complexityOfFuriaPS[2])) +" (stdev "+df.format(Math.sqrt(var(complexityOfFuriaPS[2],mean(complexityOfFuriaPS[2]))))+")");
logFile.println("Avg. ruleset size: "+df.format(mean(complexityOfFuriaPS[0]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuriaPS[0],mean(complexityOfFuriaPS[0]))))+")"+" avg. number of terms of construct in Furia feat.: "+ df.format(mean(complexityOfFuriaPS[1]))+" (stdev "+ df.format(Math.sqrt(var(complexityOfFuriaPS[1],mean(complexityOfFuriaPS[1]))))+") avg. number of terms in constructs per ruleset: "+df.format(mean(complexityOfFuriaPS[2])) +" (stdev "+df.format(Math.sqrt(var(complexityOfFuriaPS[2],mean(complexityOfFuriaPS[2]))))+")");
System.out.println("-----ACC-----");
logFile.println("-----ACC-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFoldsPS[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFoldsPS[c],mean(accuracyByFoldsPS[c]))))+")");
logFile.println("Avg. class. ACC "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(accuracyByFoldsPS[c]))+" (stdev "+ df.format(Math.sqrt(var(accuracyByFoldsPS[c],mean(accuracyByFoldsPS[c]))))+")");
}
System.out.println("-----Search time-----");
logFile.println("-----Search time-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. search time "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(paramSearchTime[c]))+" (stdev "+ df.format(Math.sqrt(var(paramSearchTime[c],mean(paramSearchTime[c]))))+")");
logFile.println("Avg. search time "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(paramSearchTime[c]))+" (stdev "+ df.format(Math.sqrt(var(paramSearchTime[c],mean(paramSearchTime[c]))))+")");
}
System.out.println("-----Learning and testing time-----");
logFile.println("-----Learning and testing time-----");
for(int c=0;c<clsTab.length;c++){
System.out.println("Avg. learn time "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(paramSLearnT[c]))+" (stdev "+ df.format(Math.sqrt(var(paramSLearnT[c],mean(paramSLearnT[c]))))+")");
logFile.println("Avg. learn time "+(excludeUppers(clsTab[c].getClass().getSimpleName()).equals("FURIA")?"FU":excludeUppers(clsTab[c].getClass().getSimpleName()))+" \t"+df.format(mean(paramSLearnT[c]))+" (stdev "+ df.format(Math.sqrt(var(paramSLearnT[c],mean(paramSLearnT[c]))))+")");
}
}
}
tTotal.stop();
if(!justExplain){
System.out.println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
logFile.println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
System.out.println("Total processing time ("+fileName+"): "+tTotal.diff());
logFile.println("Total processing time ("+fileName+"): "+tTotal.diff());
System.out.println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
logFile.println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
}
}
}
if(justExplain || (justExplain && visualisation)){
impGroupsKD.close();
attrImpListMDL_KD.close();
attrImpListReliefF_KD.close();
discIntervalsKD.close();
}
else if (!visualisation || (!justExplain && !visualisation)){
logFile.close();
if(!visualisation && !exhaustive && !jakulin)
bestParamPerFold.close();
impGroups.close();
if(!jakulin){
attrImpListMDL.close();
attrImpListReliefF.close();
}
discIntervals.close();
}
if(groupsByThrStat && !visualisation && !justExplain &&!exhaustive && !jakulin)
groupsStat.close();
rCaller.stopRCallerOnline();
}
//for constructing TRAIN dataset of depth N
public static Instances addLogFeatDepth(Instances data, List newTmpComb,OperationLog ol, boolean kononenko, int folds, int N) throws Exception{ //Discretization is by Fayyad & Irani's MDL method (the default).
String attName="";
Remove remove;
Add filter;
String attr1Val="";
Enumeration<Object> atrValues=null;
Instances newData=new Instances(data);
Instances newBinAttr=null,allDiscrete=null;
int tmp=0;
int countUnInf=0;
Set setB = new HashSet(); //for controlling names of generate combinations
String tmpArr[], newTmpDisc[]; //indexes for combinations
for(int i=0;i<newTmpComb.size();i++){
tmpArr=newTmpComb.get(i).toString().replace("[","").replace("]", "").trim().split(",");
if(tmpArr.length < N) //this shouldn't never happen if number of attributes is less than the depth then there is no constructive induction
continue;
allDiscrete=null; //for each combination e.g., [3,4,7] we dereff. allDiscrete
for(int j=0;j<tmpArr.length;j++){
attr1Val="";
if(newData.attribute(Integer.parseInt(tmpArr[j].trim())).isNominal()){
atrValues= newData.attribute(Integer.parseInt(tmpArr[j].trim())).enumerateValues();
while (atrValues.hasMoreElements())
attr1Val+=(String) atrValues.nextElement();
if(!((attr1Val.equals("01") || attr1Val.equals("10")) && newData.attributeStats(newData.attribute(Integer.parseInt(tmpArr[j].trim())).index()).distinctCount<=2))
newBinAttr=discretizeFI(data, Integer.parseInt(tmpArr[j].trim()),kononenko);
else{
remove= new Remove();
remove.setAttributeIndices((newData.attribute(Integer.parseInt(tmpArr[j].trim())).index()+1)+"");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g., first-3,5,6-last
remove.setInvertSelection(true);
remove.setInputFormat(newData);
newBinAttr = Filter.useFilter(newData, remove); //just one attribute
}
}
else if(newData.attribute(Integer.parseInt(tmpArr[j].trim())).isNumeric())
newBinAttr=discretizeFI(data, Integer.parseInt(tmpArr[j].trim()),kononenko);
if(j==0)
allDiscrete=new Instances(newBinAttr); //trick to initialize allDiscrete object
else
allDiscrete=Instances.mergeInstances(allDiscrete,newBinAttr); //we have now all discretized attributes, we don't have class attr.
}
newBinAttr=null;
newTmpDisc=new String[allDiscrete.numAttributes()];
for(int j=0;j<allDiscrete.numAttributes();j++) //in allDiscrete we don't have class attribute
newTmpDisc[j]=j+"";
List newDiscComb=Arrays.asList(Generator.combination(newTmpDisc).simple(N).stream().toArray());
String newTmpDiscName;
for(int j=0;j<newDiscComb.size();j++){
setB.clear();
tmpArr=newDiscComb.get(j).toString().replace("[","").replace("]", "").trim().split(",");
for(int k=0;k<tmpArr.length;k++){
newTmpDiscName=allDiscrete.attribute(Integer.parseInt(tmpArr[k].trim())).name();
if(newTmpDiscName.contains("=="))
setB.add(newTmpDiscName.split("==")[0]); //get just original name of the attribute
else
setB.add(newTmpDiscName);
}
if(setB.size()<N)//this means that we have in combination at least two parts from the same attribute e.g. A1-B1ofB2, A1-B1ofB3
continue;
attName=allDiscrete.attribute(Integer.parseInt(tmpArr[0].trim())).name();
for(int k=1; k<N;k++)
attName+=" "+ ol.name().toLowerCase()+" "+allDiscrete.attribute(Integer.parseInt(tmpArr[k].trim())).name();
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setNominalLabels("0,1");
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
double tmp1Attr,tmp2Attr;
for(int m = 0; m < newData.numInstances(); m++){
tmp1Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[0].trim()));
tmp2Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[1].trim()));
tmp=computeOperationTwoOperand((int)tmp1Attr,ol,(int)tmp2Attr); //we take values from two tmp datasets that are discretized and
for(int l=2; l<N;l++){
tmp2Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[l].trim()));
tmp=computeOperationTwoOperand(tmp,ol,(int)tmp2Attr); //we take values from two tmp datasets that are discretized and binarized
}
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL")) < featThr);
if(newData.attributeStats(newData.attribute(attName).index()).distinctCount==1 || remFeat){
unInfFeatures.add(attName);
countUnInf++;
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g., first-3,5,6-last
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
newBinAttr=null;
allDiscrete=null;
newTmpDisc=null;
tmpArr=null;
setB.clear();
System.gc();
return newData;
}
//for constructing TEST dataset of depth N
public static Instances addLogFeatDepth(Instances train, Instances test, List newTmpComb,OperationLog ol, boolean kononenko, int folds, int N) throws Exception{ //Discretization is by Fayyad & Irani's MDL method (the default).
String attName="";
Remove remove;
Add filter;
String attr1Val="";
Enumeration<Object> atrValues=null;
Instances newData=new Instances(test);
Instances newBinAttr=null,allDiscrete=null;
int tmp=0;
Set setB = new HashSet(); //for controlling names of generate combinations
String tmpArr[], newTmpDisc[]; //indexes for combinations
for(int i=0;i<newTmpComb.size();i++){
tmpArr=newTmpComb.get(i).toString().replace("[","").replace("]", "").trim().split(",");
if(tmpArr.length < N) //this shouldn't never happen if number of attributes is less than the depth then there is no constructive induction
continue;
allDiscrete=null; //for each combination e.g., [3,4,7] we have to dereff. allDiscrete
for(int j=0;j<tmpArr.length;j++){
attr1Val="";
if(newData.attribute(Integer.parseInt(tmpArr[j].trim())).isNominal()){
atrValues= newData.attribute(Integer.parseInt(tmpArr[j].trim())).enumerateValues();
while (atrValues.hasMoreElements())
attr1Val+=(String) atrValues.nextElement();
if(!((attr1Val.equals("01") || attr1Val.equals("10")) && newData.attributeStats(newData.attribute(Integer.parseInt(tmpArr[j].trim())).index()).distinctCount<=2))
newBinAttr=discretizeFITestBasedOnTrain(train, test,Integer.parseInt(tmpArr[j].trim()),kononenko);
else{
remove= new Remove();
remove.setAttributeIndices((newData.attribute(Integer.parseInt(tmpArr[j].trim())).index()+1)+"");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g., first-3,5,6-last
remove.setInvertSelection(true);
remove.setInputFormat(newData);
newBinAttr = Filter.useFilter(newData, remove); //just one attribute
}
}
else if(newData.attribute(Integer.parseInt(tmpArr[j].trim())).isNumeric())
newBinAttr=discretizeFITestBasedOnTrain(train, test,Integer.parseInt(tmpArr[j].trim()),kononenko);
if(j==0)
allDiscrete=new Instances(newBinAttr); //trick to initialize allDiscrete object
else
allDiscrete=Instances.mergeInstances(allDiscrete,newBinAttr); //we have now all discretized attributes, we don't have class attr.
}
newTmpDisc=new String[allDiscrete.numAttributes()];
for(int j=0;j<allDiscrete.numAttributes();j++) //in allDiscrete we don't have class attribute
newTmpDisc[j]=j+"";
List newDiscComb=Arrays.asList(Generator.combination(newTmpDisc).simple(N).stream().toArray());
String newTmpDiscName;
for(int j=0;j<newDiscComb.size();j++){
setB.clear();
tmpArr=newDiscComb.get(j).toString().replace("[","").replace("]", "").trim().split(",");
for(int k=0;k<tmpArr.length;k++){
newTmpDiscName=allDiscrete.attribute(Integer.parseInt(tmpArr[k].trim())).name();
if(newTmpDiscName.contains("=="))
setB.add(newTmpDiscName.split("==")[0]); //get just original name of the attribute
else
setB.add(newTmpDiscName);
}
if(setB.size()<N)//this means that we have in combination at least two parts from the same attribute e.g. A1-B1ofB2, A1-B1ofB3
continue;
attName=allDiscrete.attribute(Integer.parseInt(tmpArr[0].trim())).name();
for(int k=1; k<N;k++)
attName+=" "+ ol.name().toLowerCase()+" "+allDiscrete.attribute(Integer.parseInt(tmpArr[k].trim())).name();
if(unInfFeatures.contains(attName)) //if feature is not informative and doesn't exist in train dataset
continue;
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setNominalLabels("0,1");
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
double tmp1Attr,tmp2Attr;
for(int m = 0; m < newData.numInstances(); m++){
tmp1Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[0].trim()));
tmp2Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[1].trim()));
tmp=computeOperationTwoOperand((int)tmp1Attr,ol,(int)tmp2Attr); //we take values from two tmp datasets that are discretized and
for(int l=2; l<N;l++){
tmp2Attr=allDiscrete.instance(m).value(Integer.parseInt(tmpArr[l].trim()));
tmp=computeOperationTwoOperand(tmp,ol,(int)tmp2Attr); //we take values from two tmp datasets that are discretized and binarized
}
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
}
}
return newData;
}
//add relational feature
public static Instances addRelFeat(Instances data, List newTmpComb, OperationRel op, boolean train, int folds) throws Exception{ //Discretization is by Fayyad & Irani's MDL method (the default).
// we need folds for saving info of unimportant features
String attName="";
Remove remove;
Add filter;
int countUnInf=0;
String combName;
Instances newData=new Instances(data);
int idxAttr1,idxAttr2;
int tmp;
Set setA = new HashSet(); //for controlling attribute names
Set setB = new HashSet(); //for controlling names of generate combinations
for (int i=0;i<data.numAttributes()-1;i++)
setA.add(data.attribute(i).name());
for(int j=0;j<newTmpComb.size();j++){ //we get combinations in style [1,2]
idxAttr1=Integer.parseInt(newTmpComb.get(j).toString().replace("[","").replace("]", "").trim().split(",")[0].trim());
idxAttr2=Integer.parseInt(newTmpComb.get(j).toString().replace("[","").replace("]", "").trim().split(",")[1].trim());
attName="";
if(newData.attribute(idxAttr1).isNumeric() && newData.attribute(idxAttr2).isNumeric()){
combName=newData.attribute(idxAttr1).name()+newData.attribute(idxAttr2).name();
if(setB.contains(combName)) //if combination exists, we don't generate feature
continue;
else
setB.add(combName);
attName=newData.attribute(idxAttr1).name()+" "+op.name()+" "+newData.attribute(idxAttr2).name();
if(setA.contains(attName) || (!train && unInfFeatures.contains(attName))) //if attribute exists or it is uninformative, we don't add it
continue;
else
setA.add(attName);
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes()));
filter.setAttributeName(attName);
filter.setNominalLabels("0,1");
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int m = 0; m < newData.numInstances(); m++){
tmp=computeRelOpTwoOperand(newData.instance(m).value(idxAttr1), op, newData.instance(m).value(idxAttr2));
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
}
if(train && !attName.equals("")){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL") < featThr));
if(!attName.trim().equals(""))
if(newData.attributeStats(newData.attribute(attName).index()).distinctCount==1 || remFeat){
unInfFeatures.add(attName);
countUnInf++;
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
if(train){
if(op.name().equalsIgnoreCase("LESSTHAN"))
numberOfUnImpFeatByFolds[5][folds]=countUnInf;
if(op.name().equalsIgnoreCase("DIFF"))
numberOfUnImpFeatByFolds[6][folds]=countUnInf;
}
return newData;
}
//generate Cartesian product
public static Instances addCartFeat(Instances data, List newTmpComb,boolean kononenko, int folds, int N, boolean train) throws Exception{
String attName="";
Remove remove;
Add filter;
Instances newData=new Instances(data);
String tmp;
int countUnInf=0;
String tmpArr[]; //indexes for combinations
for(int i=0;i<newTmpComb.size();i++){
tmpArr=newTmpComb.get(i).toString().replace("[","").replace("]", "").trim().split(",");
if(tmpArr.length < N) //this shouldn't never happen if number of attributes is less than the depth then there is no constructive induction
continue;
attName= newData.attribute(Integer.parseInt(tmpArr[0].trim())).name()+"_x_"+newData.attribute(Integer.parseInt(tmpArr[1].trim())).name();
if(!train){
if(unInfFeatures.contains(attName)) //if feature is not informative and doesn't exist in train dataset
continue;
}
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
String allDIscValues=genDiscValues(data,Integer.parseInt(tmpArr[0].trim()),Integer.parseInt(tmpArr[1].trim()) );
filter.setNominalLabels(allDIscValues);
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int m = 0; m < newData.numInstances(); m++){
tmp=mergeValues(newData.instance(m).stringValue(Integer.parseInt(tmpArr[0].trim())),newData.instance(m).stringValue(Integer.parseInt(tmpArr[1].trim())));
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
if(train){ //we don't remove uninfotmative features on test fold
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL") < featThr));
if(newData.attributeStats(newData.attribute(attName).index()).distinctCount==1 || remFeat){
unInfFeatures.add(attName);
countUnInf++;
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
if(train)
numberOfUnImpFeatByFolds[7][folds]=countUnInf;
return newData;
}
//generate Cartesian product
public static Instances addCartFeat(Instances origData, Instances discData,List newTmpComb,boolean kononenko, int folds, int N, boolean train) throws Exception{
String attName="";
Remove remove;
Add filter;
Instances newData=new Instances(origData);
String tmp;
int countUnInf=0;
String tmpArr[]; //indexes for combinations
for(int i=0;i<newTmpComb.size();i++){
tmpArr=newTmpComb.get(i).toString().replace("[","").replace("]", "").trim().split(",");
if(tmpArr.length < N) //this shouldn't never happen if number of attributes is less than the depth then there is no constructive induction
continue;
attName= newData.attribute(Integer.parseInt(tmpArr[0].trim())).name()+"_x_"+newData.attribute(Integer.parseInt(tmpArr[1].trim())).name();
if(!train){
if(unInfFeatures.contains(attName)) //if feature is not informative and doesn't exist in train dataset
continue;
}
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
String allDIscValues=genDiscValues(discData,Integer.parseInt(tmpArr[0].trim()),Integer.parseInt(tmpArr[1].trim()) );
filter.setNominalLabels(allDIscValues);
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int m = 0; m < newData.numInstances(); m++){
//we take data from discretized dataset
tmp=mergeValues(discData.instance(m).stringValue(Integer.parseInt(tmpArr[0].trim())),discData.instance(m).stringValue(Integer.parseInt(tmpArr[1].trim())));
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
if(train){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL") < featThr));
if(newData.attributeStats(newData.attribute(attName).index()).distinctCount==1 || remFeat){
unInfFeatures.add(attName);
countUnInf++;
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
if(train)
numberOfUnImpFeatByFolds[7][folds]=countUnInf;
return newData;
}
//generateFeatFromFuriaNoOR
public static List<String> genFeatFromFuria(Instances data,ArrayList<String> allComb, int c, double cfF, double cfI, boolean covering, boolean featFromExplClass){ //cfF=0.85 ... Furia, cfI=0.9 number of intances covered, c ... class to explain
List<String> list=null;
ArrayList<String> oneConcept=new ArrayList<>();
ArrayList<String> oneAttrConcepts=new ArrayList<>();
HashSet<String> allFeatures = new HashSet<>();
String mergedOrRule="",attName="";
Instances newData= new Instances(data);
int tmp;
String classValue="";
String nomValue;
RemoveRange rr=new RemoveRange();
String newTmp[];
boolean ruleTrue=true;
int distribucija[]=null;
double pct = 0;
int count;
try{
int nc=data.attributeStats(data.classIndex()).nominalCounts[c]; //number of instances from the explained class
String className=data.attribute(data.classIndex()).name(); //we need class name for parsing
String classValueExp=data.attribute(data.classIndex()).value(c);
for(int j=0;j<allComb.size();j++){
count=0;
newTmp=allComb.get(j).split(",");
String attrList="";
for(int i=0;i<newTmp.length;i++)
attrList+=(Integer.parseInt(newTmp[i])+1)+",";
attrList+=(data.classIndex()+1);
//use only attributes from concept
Remove remove1= new Remove();
remove1.setAttributeIndices(attrList); //we select attributes that we need in combination with setInvertSelection, otherwise we select attributes that we delete
remove1.setInvertSelection(true); //we need to remove not selected attributes - invert selection
remove1.setInputFormat(data);
Instances instNew = Filter.useFilter(data, remove1); //select only attributes that are in the model (inst - test dataset)
instNew.setClassIndex(instNew.numAttributes()-1); //set class attribute
String ruleName[]=null;
Classifier model;
//using Furia
FURIA fu=new FURIA();
fu.setOptimizations(5); //number of optimization steps
model=fu;
model.buildClassifier(instNew);
ArrayList<Rule> arrRule=new ArrayList<>();
arrRule= fu.getRuleset();
ArrayList<String> newRules=new ArrayList<>();
double tmpCF;
ruleName=new String[arrRule.size()];
for(int i = 0; i < ruleName.length; i++){
tmpCF=Math.round(100.0 * ((FURIA.RipperRule) arrRule.get(i)).getConfidence())/ 100.0;
ruleName[i] = ((FURIA.RipperRule)arrRule.get(i)).toString(instNew.classAttribute())+" (CF = "+ tmpCF + ")\n";
if(ruleName[i].contains("−"))
ruleName[i]=ruleName[i].replaceAll("−", "-");
if(featFromExplClass)
if(!ruleName[i].substring(ruleName[i].indexOf(className)+className.length()+1,ruleName[i].indexOf("(CF")).trim().equals(classValueExp) || tmpCF < cfF)
continue;
if(!ruleName[i].contains(" and ")){
if(ruleName[i].contains("[-inf")){
if(ruleName[i].contains(",") && !ruleName[i].contains("inf, inf")){ //e.g., (VvLOX in [-inf, -inf, 0.182135, 0.349099])
ruleName[i]=ruleName[i].replace("in [-", "<= ");
String tabTmp[]=ruleName[i].split(",");
ruleName[i]=ruleName[i].replace("]", "").replace(")","").substring(0,ruleName[i].indexOf("<=")+3)+tabTmp[tabTmp.length-2].trim()+")";
}
if(ruleName[i].contains(",") && ruleName[i].contains("inf, inf")){ //e.g (VvAGPL in [-0.25942, -0.249411, inf, inf]) (VvGLC2 in [-0.343839, 3.317059, inf, inf])
ruleName[i]=ruleName[i].replace("in [", ">= ");
String tabTmp[]=ruleName[i].split(",");
ruleName[i]=ruleName[i].replace("]", "").replace(")","").substring(0,ruleName[i].indexOf(">=")+2)+tabTmp[0]+")";
}
}
if(ruleName[i].contains("inf, inf")){ //e.g., (Cycle time in [64.7, 113.2, inf, inf]); Cycle time is attr.
ruleName[i]=ruleName[i].replace("in [", ">= ");
if(ruleName[i].contains(",")){
String tabTmp[]=ruleName[i].split(",");
ruleName[i]=ruleName[i].replace("]", "").replace(")","").substring(0,ruleName[i].indexOf(">=")+2)+tabTmp[1]+")";
}
}
oneConcept.add(ruleName[i].trim());
}
else
newRules.add(ruleName[i]);
}
for(int i=0;i<oneConcept.size();i++){
if(oneConcept.get(i).contains(" => "))
mergedOrRule=oneConcept.get(i).split(" => ")[0].trim();
else
mergedOrRule=oneConcept.get(i);
oneAttrConcepts.add(mergedOrRule);
}
newRules.addAll(oneAttrConcepts);
oneAttrConcepts.clear();
oneConcept.clear();
String attrNameRule;
double attrValue;
for (int i = 0; i < newRules.size(); i++){
if(newRules.get(i).contains(" and ")){
attName=newRules.get(i).split(" => ")[0].trim(); //left side ... attributes
int idxStart=newRules.get(i).split(" => ")[1].trim().indexOf("=");
int idxEnd=newRules.get(i).split(" => ")[1].trim().indexOf("(");
classValue=newRules.get(i).split(" => ")[1].trim().substring(idxStart+1, idxEnd).trim(); //right side ... class value
pct=Double.parseDouble(newRules.get(i).split(" => ")[1].trim().substring(newRules.get(i).split(" => ")[1].trim().indexOf("CF")).split(" = ")[1].replace(")","").trim());
newRules.set(i,newRules.get(i).split(" => ")[0].trim()); //left side ... attributes
String allRules1[];
allRules1=newRules.get(i).split(" and ");
newRules.set(i,"");
for(int p=0;p<allRules1.length;p++){
if(allRules1[p].contains("-inf, -inf")){
allRules1[p]=allRules1[p].replace("in [-inf", "<= ");
String tabTmp[]=allRules1[p].split(",");
allRules1[p]=allRules1[p].replace("]", "").replace(")","").substring(0,allRules1[p].indexOf("<=")+3)+tabTmp[tabTmp.length-2].replace("]", "").replace(")","").trim()+")";
}
else if(allRules1[p].contains("inf, inf")){
allRules1[p]=allRules1[p].replace("in [", ">= ");
String tabTmp[]=allRules1[p].split(",");
allRules1[p]=allRules1[p].replace("]", "").replace(")","").substring(0,allRules1[p].indexOf(">=")+2)+tabTmp[1]+")";
}
if(p==allRules1.length-1)
newRules.set(i,newRules.get(i)+allRules1[p].trim());
else
newRules.set(i,newRules.get(i)+allRules1[p].trim()+" and ");
}
attName=newRules.get(i);
}
else
attName=newRules.get(i); //attribute name with just on "condition"
if(!attName.equals(""))
allFeatures.add(attName);
if(covering){ //controling covering instances by features
String attrTmpRule[] = null;
if(attName.contains(" and "))
attrTmpRule= attName.trim().split(" and "); //left side ... attributes e.g., (A3 = 1) and (A2 = 1) and (A1 = 0)
else{
attName=attName+"@";
attrTmpRule=attName.trim().split("@");
}
attName="";
String remove="";
for(int l = 0; l < newData.numInstances(); l++){
ruleTrue=true;
tmp=0;
for(int k=0;k<attrTmpRule.length;k++){ //parsing e.g., (A3 = 1) and (A2 = 1) and (A1 = 0) we parse attribute name and value
//AND
if(attrTmpRule[k].contains(" = ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" = ")[0].trim();
if(newData.attribute(attrNameRule).isNominal()){
nomValue=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" = ")[1].trim(); //nominal value as a string
if(!newData.instance(l).stringValue(newData.attribute(attrNameRule).index()).equals(nomValue)){
ruleTrue=false;
break;
}
}
else{
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" = ")[1].trim());
if(newData.instance(l).value(newData.attribute(attrNameRule)) != attrValue){
ruleTrue=false;
break;
}
}
}
else if (attrTmpRule[k].contains(" >= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" >= ")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" >= ")[1].trim());
if(!(newData.instance(l).value(newData.attribute(attrNameRule)) >= attrValue)){
ruleTrue=false;
break;
}
}
else if (attrTmpRule[k].contains(" <= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" <= ")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" <= ")[1].trim());
if(!(newData.instance(l).value(newData.attribute(attrNameRule)) <= attrValue)){
ruleTrue=false;
break;
}
}
}
if(ruleTrue)
remove+=(l+1)+",";
}
//remove covered instances
rr.setInstancesIndices(remove);
distribucija=newData.attributeStats(newData.classIndex()).nominalCounts;
rr.setInputFormat(newData);
newData = Filter.useFilter(newData, rr);
distribucija=newData.attributeStats(newData.classIndex()).nominalCounts;
remove="";
}
}
if(covering){
if(distribucija!=null){
if(distribucija[c]<=Math.ceil(nc-(cfI*nc))) // all covered //if(distribucija[classToExplain]==0)
break;
}
}
}
list = new ArrayList<>(allFeatures);
}
catch(Exception e){
System.out.println("Instability in FURIA algorithm");
logFile.println("Instability in FURIA algorithm");
e.printStackTrace(System.err);
}
return list;
}
public static Instances addFeatNumOfN(Instances data,ArrayList<String> allFeat, boolean train) throws Exception{
String attName, attrNameRule,nomValue;
Add filter;
Remove remove;
String splitChar;
String nominalLabels;
double attrValue;
Instances newData=new Instances(data);
int tmp;
for(int i=0;i<allFeat.size();i++){
nominalLabels="";
attName=allFeat.get(i);
if(!attName.contains(" and ")) //num-of-N features that have only one condition are not generated because they are the same as those generated by FURIA -> e.g., the result of num-of-N ((A8> = 1)) and (A8> = 1) is the same
continue;
String attrTmpRule [] = attName.trim().split(" and "); //left side ... attributes e.g., (A3 = 1) and (A2 = 1) and (A1 = 0)
for(int a=0;a<=attrTmpRule.length;a++) //max is all conditions (attrTmpRule.length), min is 0 -> 0...attrTmpRule.length
if(a==attrTmpRule.length)
nominalLabels+=a;
else
nominalLabels+=a+",";
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
attName="num-of-N("+attName+")";
if((!train && unInfFeatures.contains(attName))) //num-of-N features that have only one condition are not generated because they are the same as those generated by FURIA -> e.g., the result of num-of-N ((A8> = 1)) and (A8> = 1) is the same
continue;
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setNominalLabels(nominalLabels); //filter.setNominalLabels("0,1,2,3,4");
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int j = 0; j < newData.numInstances(); j++){
tmp=0;
String tmpOrRule []=null;
for(int k=0;k<attrTmpRule.length;k++){ //parsing e.g., (A3 = 1) and (A2 = 1) and (A1 = 0) we parse attribute name and value
//OR
if(attrTmpRule[k].contains(" or ")){
tmpOrRule=attrTmpRule[k].split(" or ");
for(int n=0;n<tmpOrRule.length;n++){
splitChar=tmpOrRule[n].contains(" = ")?" = ":tmpOrRule[n].contains(" >= ")?" >= ":" <= "; //we consider only one of these three options =, >= in <=
attrNameRule=tmpOrRule[n].trim().replace("(", "").replace(")", "").split(splitChar)[0].trim();
attrValue=Double.parseDouble(tmpOrRule[n].trim().replace("(", "").replace(")", "").split(splitChar)[1].trim());
if(newData.attribute(attrNameRule).isNominal()){
nomValue=tmpOrRule[n].trim().replace("(", "").replace(")", "").split("=")[1].trim(); //nominal value as a string
if(newData.instance(j).stringValue(newData.attribute(attrNameRule).index()).equals(nomValue)){
tmp++;
}
}
else{
if(splitChar.equals(" = "))
if(newData.instance(j).value(newData.attribute(attrNameRule)) == attrValue)
tmp++;
if(splitChar.equals(" >= "))
if(newData.instance(j).value(newData.attribute(attrNameRule)) >= attrValue)
tmp++;
if(splitChar.equals(" <= "))
if(newData.instance(j).value(newData.attribute(attrNameRule)) <= attrValue)
tmp++;
}
}
continue; //if the rule contains or then we need to move on reviewing the next rule
}
//AND
if(attrTmpRule[k].contains(" = ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split("=")[0].trim();
if(newData.attribute(attrNameRule).isNominal()){
nomValue=attrTmpRule[k].trim().replace("(", "").replace(")", "").split("=")[1].trim(); //nominal value as a string
if(newData.instance(j).stringValue(newData.attribute(attrNameRule).index()).equals(nomValue))
tmp++;
}
else{
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split("=")[1].trim());
if(newData.instance(j).value(newData.attribute(attrNameRule)) == attrValue)
tmp++;
}
}
else if (attrTmpRule[k].contains(" >= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" >= ")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(">=")[1].trim());
if(newData.instance(j).value(newData.attribute(attrNameRule)) >= attrValue)
tmp++;
}
else if (attrTmpRule[k].contains(" <= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split("<=")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split("<=")[1].trim());
if(newData.instance(j).value(newData.attribute(attrNameRule)) <= attrValue)
tmp++;
}
}
newData.instance(j).setValue(newData.numAttributes() - 2, tmp);
}
if(train){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL") < featThr));
if(remFeat){
unInfFeatures.add(attName);
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
return newData;
}
public static Instances addFeatures(Instances data,ArrayList<String> allFeat, boolean train) throws Exception{ //add features from FURIA to dataset
String attName, attrNameRule, nomValue;
Remove remove;
Add filter;
double attrValue;
Instances newData=new Instances(data);
int tmp=0;
for(int i=0;i<allFeat.size();i++){
attName=allFeat.get(i);
String attrTmpRule [] = attName.trim().split(" and "); //left side ... attributes e.g., (A3 = 1) and (A2 = 1) and (A1 = 0)
if(!train && unInfFeatures.contains(attName)) //if feature is not informative and doesn't exist in train dataset
continue;
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setNominalLabels("0,1");
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int j = 0; j < newData.numInstances(); j++){
tmp=1;
for(int k=0;k<attrTmpRule.length;k++){ //parsing e.g., (A3 = 1) and (A2 = 1) and (A1 = 0) we parse attribute name and value
if(tmp==0) //if the conditions within the OR are false then return false
break;
//AND
if(attrTmpRule[k].contains(" = ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" = ")[0].trim();
if(newData.attribute(attrNameRule).isNominal()){
nomValue=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" = ")[1].trim(); //nominal value as a string
if(!newData.instance(j).stringValue(newData.attribute(attrNameRule).index()).equals(nomValue)){
tmp=0;
break;
}
}
else{
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split("=")[1].trim());
if(newData.instance(j).value(newData.attribute(attrNameRule)) != attrValue){
tmp=0;
break;
}
}
}
else if (attrTmpRule[k].contains(" >= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" >= ")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" >= ")[1].trim());
if(!(newData.instance(j).value(newData.attribute(attrNameRule)) >= attrValue)){
tmp=0;
break;
}
}
else if (attrTmpRule[k].contains(" <= ")){
attrNameRule=attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" <= ")[0].trim();
attrValue=Double.parseDouble(attrTmpRule[k].trim().replace("(", "").replace(")", "").split(" <= ")[1].trim());
if(!(newData.instance(j).value(newData.attribute(attrNameRule)) <= attrValue)){
tmp=0;
break;
}
}
}
newData.instance(j).setValue(newData.numAttributes() - 2, tmp);
}
if(train){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calculateAttrImportance(newData, attName, "MDL") < featThr));
if(remFeat){
unInfFeatures.add(attName);
//countUnInf++;
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
return newData;
}
public static Instances addNumFeat(Instances data, OperationNum op, List newTmpComb, boolean train, RCaller rCaller, RCode code) throws Exception{ //Discretization is by Fayyad & Irani's MDL method (the default).
// we need folds for saving info of unimportant features
String attName="";
Add filter;
Remove remove;
String combName;
Instances newData=new Instances(data);
int idxAttr1,idxAttr2, tmpIdx;
double tmp;
Set setA = new HashSet(); //to control attribute names
Set setB = new HashSet(); //to control the names of generated combinations
for (int i=0;i<data.numAttributes()-1;i++)
setA.add(data.attribute(i).name());
for(int j=0;j<newTmpComb.size();j++){ //we get combinations in style [1,2]
idxAttr1=Integer.parseInt(newTmpComb.get(j).toString().replace("[","").replace("]", "").trim().split(",")[0].trim());
idxAttr2=Integer.parseInt(newTmpComb.get(j).toString().replace("[","").replace("]", "").trim().split(",")[1].trim());
if(!(op==OperationNum.ADD) && !(op==OperationNum.ABSDIFF) && !(op==OperationNum.SUBTRACT)){
for(int k=0;k<2; k++){ //we try all combinations A1/A2 and A2/A1
if(k==1){
tmpIdx=idxAttr1;
idxAttr1=idxAttr2;
idxAttr2=tmpIdx;
}
if(newData.attribute(idxAttr1).isNumeric() && newData.attribute(idxAttr2).isNumeric()){
combName=newData.attribute(idxAttr1).name()+newData.attribute(idxAttr2).name();
if(setB.contains(combName)) //if a combination already exists, we do not generate it
continue;
else
setB.add(combName);
attName=newData.attribute(idxAttr1).name()+" "+op.name()+" "+newData.attribute(idxAttr2).name();
if(setA.contains(attName) || (!train && unInfFeatures.contains(attName))) //if the attribute already exists, do not add it
continue;
else
setA.add(attName);
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //parameter of the method must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int m = 0; m < newData.numInstances(); m++){
if(newData.instance(m).value(idxAttr2)==0.0 && op==OperationNum.DIVIDE){ //division by zero NaN in weka NaN is marked the same as missing value -> ?
if(newData.instance(m).value(idxAttr1)<0 || newData.instance(m).value(idxAttr2)<0)
newData.instance(m).setValue(newData.numAttributes() - 2, -Float.MAX_VALUE);
else
newData.instance(m).setValue(newData.numAttributes() - 2, Float.MAX_VALUE);
}
else{
tmp=computeNumOperation(newData.instance(m).value(idxAttr1), op, newData.instance(m).value(idxAttr2));
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
}
}
if(train && !attName.equals("")){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calcFeatImpMDL(newData, newData.attribute(attName).index(), rCaller, code) < featThr));
if(remFeat){
unInfFeatures.add(attName);
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
}
else{
if(newData.attribute(idxAttr1).isNumeric() && newData.attribute(idxAttr2).isNumeric()){
combName=newData.attribute(idxAttr1).name()+newData.attribute(idxAttr2).name();
if(setB.contains(combName)) //if combination exists, then we don't generate it
continue;
else
setB.add(combName);
attName=newData.attribute(idxAttr1).name()+" "+op.name()+" "+newData.attribute(idxAttr2).name();
if(setA.contains(attName) || (!train && unInfFeatures.contains(attName)) || attName.equals("")) //if the attribute already exists, do not add it
continue;
else
setA.add(attName);
filter= new Add();
filter.setAttributeIndex("" + (newData.numAttributes())); //the method parameter must be String
filter.setAttributeName(attName); //indexes are from 0 ... n-1, attribute names are from 1 to n
filter.setInputFormat(newData);
newData = Filter.useFilter(newData, filter);
for(int m = 0; m < newData.numInstances(); m++){
if(newData.instance(m).value(idxAttr2)==0.0 && op==OperationNum.DIVIDE){ //division by zero NaN in weka NaN is marked the same as missing value -> ?
if(newData.instance(m).value(idxAttr1)<0 || newData.instance(m).value(idxAttr2)<0) //enriched dataset, (-)Float.MAX_VALUE signals division by zero
newData.instance(m).setValue(newData.numAttributes() - 2, -Float.MAX_VALUE);
else
newData.instance(m).setValue(newData.numAttributes() - 2, Float.MAX_VALUE);
}
else{
tmp=computeNumOperation(newData.instance(m).value(idxAttr1), op, newData.instance(m).value(idxAttr2));
newData.instance(m).setValue(newData.numAttributes() - 2, tmp); //enriched dataset
}
}
}
if(train && !attName.equals("")){
boolean remFeat=false;
remFeat=(evalFeatDuringFC && (calcFeatImpMDL(newData, newData.attribute(attName).index(), rCaller, code) < featThr));
if(remFeat){
unInfFeatures.add(attName);
remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+"");
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
}
}
}
attName="";
}
return newData;
}
public static int[] numOfFeat(Instances dataset,int numOfAttr) { //counting features
int numLogical = 0, numThr=0, numFuria=0,numCartesian=0, numRelational=0, numNumerical=0;
int features[]=new int[6];
for(int i=numOfAttr;i<dataset.numAttributes()-1;i++){
if(dataset.attribute(i).name().contains(" xor ")||(dataset.attribute(i).name().contains(" or ") && !dataset.attribute(i).name().contains("(") )||dataset.attribute(i).name().contains(" equ ")||dataset.attribute(i).name().contains(" impl ") || (dataset.attribute(i).name().contains(" and ") && !dataset.attribute(i).name().contains("(") ))
numLogical++;
if(dataset.attribute(i).name().contains("num-of-N(("))
numThr++;
if(dataset.attribute(i).name().contains("(") && !dataset.attribute(i).name().contains("num-of-N(("))
numFuria++;
if(dataset.attribute(i).name().contains("_x_"))
numCartesian++;
if(dataset.attribute(i).name().contains(" LESSTHAN ") || dataset.attribute(i).name().contains(" EQUAL ") || dataset.attribute(i).name().contains(" DIFF "))
numRelational++;
if(dataset.attribute(i).name().contains(" DIVIDE ") || dataset.attribute(i).name().contains(" SUBTRACT ") || dataset.attribute(i).name().contains(" ADD "))
numNumerical++;
}
features[0]=numLogical;
features[1]=numThr;
features[2]=numFuria;
features[3]=numCartesian;
features[4]=numRelational;
features[5]=numNumerical;
return features;
}
public static int numOfLogFeatInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{
int numLogical=0;
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains(" xor ")||(attName.contains(" or ") && !attName.contains("("))||attName.contains(" equ ")||attName.contains(" impl ")||(attName.contains(" and ") && !attName.contains("(")))
numLogical++;
}
return numLogical;
}
public static int[] numOfRelFeatInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //number of relational features and sum of terms in constructs
int numRelational[]=new int[2]; //0-num of features, 1-sum of terms in constructs
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains(" LESSTHAN ") || attName.contains(" DIFF ") || attName.contains(" EQUAL ")){
numRelational[0]++;
numRelational[1]+=2; //lessthan, diff and equal are features of order 2 e.g., A1 DIFF A2
}
}
return numRelational;
}
public static int[] numOfCartFeatInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //number of Cartesian features and sum of terms in constructs
int num[]=new int[2]; //0-num of features, 1-sum of terms in constructs
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains("_x_")){
num[0]++;
num[1]+=attName.split("_x_").length; //sum of terms
}
}
return num;
}
public static int[] numOfDrThrFeatInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //number of decision rule and threshold features and sum of terms in constructs in tree
int num[]=new int[4]; //0-num of feat, 1-sum of terms in constructs of decision rule (FURIA) feat, 2-num of thr feat, 3-sum of construct of thr feat
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains("(") && !attName.contains("num-of-N((")){ //counting decision rule (FURIA) feat
num[0]++;
if(attName.contains(" and "))
num[1]+=attName.split(" and ").length;
else
num[1]++; // we need to count also features with one attribute e.g., (V16 = success)
//NOT IN USE ANYMORE
if(attName.contains(" or ")) //this is our combination of or rule; not default Furia rule
num[1]+=attName.split(" or ").length;
}
if(attName.contains("num-of-N((")){ //counting threshold feat
num[2]++;
if(attName.contains(" and "))
num[3]+=attName.split(" and ").length;
}
}
return num;
}
public static int[] numOfNumFeatInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //number of numerical features and sum of terms in constructs in tree
int numNumerical[]=new int[2]; //0-num of feat, 1-sum of terms in constructs of numerical feat
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains(" DIVIDE ") || attName.contains(" ADD ") || attName.contains(" SUBTRACT ")){
numNumerical[0]++;
numNumerical[1]+=2; //DIVIDE, ADD and SUBTRACT are features of order 2 e.g., A1 DIVIDE A2
}
}
return numNumerical;
}
//for original model origDataset and enrichedDataset are the same
public static int sumOfTermsInConstrInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //sum of terms in constructs in tree
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String tmpSize[];
int sumOfConstructs=0;
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); //we don't need " "
if(attName.contains(" or ")){ //binary features (or,xor,equ,impl)
tmpSize=attName.split(" or ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" impl ")){
tmpSize=attName.split(" impl ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" xor ")){
tmpSize=attName.split(" xor ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" equ ")){
tmpSize=attName.split(" equ ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" and ")){//decision rules and thr features (... and ...) and logical feat A1 and A2
tmpSize=attName.split(" and ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" LESSTHAN ")){ //A1 LESSTHAN A2
tmpSize=attName.split(" LESSTHAN ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" EQUAL ")){
tmpSize=attName.split(" EQUAL ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" DIFF ")){
tmpSize=attName.split(" DIFF ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains("_x_")){ //A1_X_A2 ... Cartesian product
tmpSize=attName.split("_x_");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" DIVIDE ")){
tmpSize=attName.split(" DIVIDE ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" ADD ")){
tmpSize=attName.split(" ADD ");
sumOfConstructs+=tmpSize.length;
}
else if(attName.contains(" SUBTRACT ")){
tmpSize=attName.split(" SUBTRACT ");
sumOfConstructs+=tmpSize.length;
}
else
sumOfConstructs++; //if there is no construct in node there is only one attribute
}
return sumOfConstructs;
}
public static int sumOfLFTermsInConstrInTree(Instances data, int numOfOrigAttr, J48 dt) throws Exception{ //sum of terms in constructs of logical operator features in tree
String notEscaped=dt.graph();
String tmpParse[]=notEscaped.split("\\r?\\n"); //split by new line
String tmpSize[];
int sumOfConstructs=0;
String attName;
for(int o=1;o<tmpParse.length-1;o++){ //first and last field are digraph J48Tree { and }
if(tmpParse[o].contains("->") || tmpParse[o].contains("shape=box")) //skip leaves or binary marks (left(0) an right(1))
continue;
attName=tmpParse[o].substring(tmpParse[o].indexOf("\"")+1,tmpParse[o].lastIndexOf("\"")); // we don't need " "
if(attName.contains(" or ")){ //binary features (or,xor,equ,impl)
tmpSize=attName.split(" or ");
sumOfConstructs+=tmpSize.length;
}
if(attName.contains(" impl ")){
tmpSize=attName.split(" impl ");
sumOfConstructs+=tmpSize.length;
}
if(attName.contains(" xor ")){
tmpSize=attName.split(" xor ");
sumOfConstructs+=tmpSize.length;
}
if(attName.contains(" equ ")){
tmpSize=attName.split(" equ ");
sumOfConstructs+=tmpSize.length;
}
if(attName.contains(" and ") && !attName.contains("(")){ //decision rules and thr features (... and ...) and logical feat A1 and A2
tmpSize=attName.split(" and ");
sumOfConstructs+=tmpSize.length;
}
}
return sumOfConstructs;
}
public static int sumOfTermsInConstrInRule(ArrayList<Rule> ar1, Instances data){
int count=0;
String rule;
String tmpOuter[];
for(Rule el: ar1){
rule=((FURIA.RipperRule)el).toString(data.classAttribute());
if(rule.contains(" and ")){
tmpOuter=rule.split(" and ");
count+=tmpOuter.length;
for (int i=0;i<tmpOuter.length; i++){
if(tmpOuter[i].contains(" or ")) //if we have e.g., (A1 or A2) and (A4 or A7) then we get from and "split" 2 and then from each or "split" 1-> 2+1+1=4
count+=tmpOuter[i].split(" or ").length-1; //we have constructive induction of depth 3 for conjunction and disjunction ... -1 because we count one attribute in and split
else if(tmpOuter[i].contains(" xor "))
count++;
else if(tmpOuter[i].contains(" impl "))
count++;
else if(tmpOuter[i].contains(" equ "))
count++;
else if(tmpOuter[i].contains(" LESSTHAN "))
count++;
else if(tmpOuter[i].contains(" EQUAL "))
count++;
else if(tmpOuter[i].contains(" DIFF "))
count++;
else if(tmpOuter[i].contains("_x_"))
count++;
else if(tmpOuter[i].contains(" DIVIDE "))
count++;
else if(tmpOuter[i].contains(" ADD "))
count++;
else if(tmpOuter[i].contains(" SUBTRACT "))
count++;
}
}
else if(rule.contains(" or ")){ //our construction of or in decision rules features
tmpOuter=rule.split(" or ");
count+=tmpOuter.length;
}
else if(rule.contains(" xor ")){
tmpOuter=rule.split(" xor ");
count+=tmpOuter.length;
}
else if(rule.contains(" impl ")){
tmpOuter=rule.split(" impl ");
count+=tmpOuter.length;
}
else if(rule.contains(" equ ")){
tmpOuter=rule.split(" equ ");
count+=tmpOuter.length;
}
else if(rule.contains(" LESSTHAN ")){
tmpOuter=rule.split(" LESSTHAN ");
count+=tmpOuter.length;
}
else if(rule.contains(" EQUAL ")){
tmpOuter=rule.split(" EQUAL ");
count+=tmpOuter.length;
}
else if(rule.contains(" DIFF ")){
tmpOuter=rule.split(" DIFF ");
count+=tmpOuter.length;
}
else if(rule.contains("_x_")){ //Cartesian product
tmpOuter=rule.split("_x_");
count+=tmpOuter.length;
}
else if(rule.contains(" DIVIDE ")){
tmpOuter=rule.split(" DIVIDE ");
count+=tmpOuter.length;
}
else if(rule.contains(" ADD ")){
tmpOuter=rule.split(" ADD ");
count+=tmpOuter.length;
}
else if(rule.contains(" SUBTRACT ")){
tmpOuter=rule.split(" SUBTRACT ");
count+=tmpOuter.length;
}
else
count++;
}
return count;
}
//Fayyad & Irani's MDL
public static Instances discretizeFI(Instances newData, int attrIdx, boolean kononenko) throws Exception{ //we have to prepare data without class variable
NominalToBinary nominalToBinary=null;
Remove remove= new Remove();
remove.setAttributeIndices((newData.attribute(attrIdx).index()+1)+",last");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g., first-3,5,6-last
remove.setInvertSelection(true);
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove); //just one attribute
if(newData.attribute(0).isNominal()){ //we have only one attribute in dataset ... index starts with 0
nominalToBinary = new NominalToBinary();
nominalToBinary.setAttributeIndices("first");
nominalToBinary.setInputFormat(newData);
}
if(newData.attribute(0).isNumeric()){
//discretization
weka.filters.supervised.attribute.Discretize filter; //because of the same class name in different packages
//setup filter
filter = new weka.filters.supervised.attribute.Discretize();
//Discretization is by Fayyad & Irani's MDL method (the default). Continuous attributes are discretized and binarized.
//filter.
newData.setClassIndex(newData.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
filter.setUseBinNumbers(true); //e.g. BXofY ... B1of1
filter.setUseKononenko(kononenko);
filter.setInputFormat(newData);
//apply filter
newData = Filter.useFilter(newData, filter);
//nominal to binary
nominalToBinary = new NominalToBinary();
nominalToBinary.setAttributeIndices("first");
nominalToBinary.setInputFormat(newData);
}
newData = Filter.useFilter(newData, nominalToBinary);
remove= new Remove();
remove.setAttributeIndices("last"); //we remove class index
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove);
//rename attributes e.g., from 'A7=\'B1of3\'' to A7-B1of3
String tmp;
for (int i=0;i<newData.numAttributes();i++){
tmp=newData.attribute(i).name();
if(tmp.contains("=") || tmp.contains("'")){
tmp=tmp.replaceFirst("=","==").replaceAll("'", "");
newData.renameAttribute(i, tmp);
}
}
return newData;
}
//Fayyad & Irani's MDL
public static Instances discretizeFITestBasedOnTrain(Instances trainData, Instances testData, int attrIdx, boolean kononenko) throws Exception{ //we have to prepare data without class variable
Instances train=new Instances(trainData);
Instances test=new Instances(testData);
NominalToBinary nominalToBinary=null;
Remove remove= new Remove();
remove.setAttributeIndices((train.attribute(attrIdx).index()+1)+",last");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g.: first-3,5,6-last
remove.setInvertSelection(true);
remove.setInputFormat(train);
train = Filter.useFilter(train, remove); //just one attribute
remove.setAttributeIndices((test.attribute(attrIdx).index()+1)+",last");//rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g.: first-3,5,6-last
remove.setInvertSelection(true);
remove.setInputFormat(test);
test = Filter.useFilter(test, remove); //just one attribute
if(test.attribute(0).isNominal()){ //we have only one attribute in dataset ... index starts with 0
nominalToBinary = new NominalToBinary();
nominalToBinary.setAttributeIndices("first");
nominalToBinary.setInputFormat(test);
}
if(test.attribute(0).isNumeric()){
//discretization
weka.filters.supervised.attribute.Discretize filter; //because of the same class name in different packages
//setup filter
filter = new weka.filters.supervised.attribute.Discretize();
//Discretization is by Fayyad & Irani's MDL method (the default). Continuous attributes are discretized and binarized.
//filter.
train.setClassIndex(train.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
test.setClassIndex(test.numAttributes()-1);
filter.setUseBinNumbers(true); //e.g. BXofY ... B1of1
filter.setUseKononenko(kononenko);
filter.setInputFormat(train);
//apply filter
train = Filter.useFilter(train, filter);
test = Filter.useFilter(test, filter); //we have to apply discretization on test dataset based on info from train dataset
//nominal to binary
nominalToBinary = new NominalToBinary();
nominalToBinary.setAttributeIndices("first");
nominalToBinary.setInputFormat(test);
}
test = Filter.useFilter(test, nominalToBinary);
remove= new Remove();
remove.setAttributeIndices("last"); //we remove class index
remove.setInputFormat(test);
test = Filter.useFilter(test, remove);
//rename attributes e.g., from 'A7=\'B1of3\'' to A7-B1of3
String tmp;
for (int i=0;i<test.numAttributes();i++){
tmp=test.attribute(i).name();
if(tmp.contains("=") || tmp.contains("'")){
tmp=tmp.replaceFirst("=", "==").replaceAll("'", "");
test.renameAttribute(i, tmp);
}
}
return test;
}
//only for discrete attributes
public static void mdlAllAttrFeat(Instances data) throws Exception {
Map<String, Double> mapMDL=new TreeMap<>(Collections.reverseOrder());
for(int i=0;i<data.numAttributes()-1;i++){
KononenkosMDL kMDL=new KononenkosMDL(data);
mapMDL.put(data.attribute(i).name(),kMDL.kononenkosMDL(data,i)); //kononenkosMDL ... computing MDL for each attribute
}
System.out.println("Kononenko's MDL");
LinkedList<Map.Entry<String, Double>> listMDL = new LinkedList<>(mapMDL.entrySet());
Comparator<Map.Entry<String, Double>> comparator2 = Comparator.comparing(Map.Entry::getValue);
Collections.sort(listMDL, comparator2.reversed()); //if we want reversed order ... descending order
for(Map.Entry<String, Double> me : listMDL)
System.out.printf(" %4.4f %s\n",me.getValue(), me.getKey());
}
public static void mdlCORElearn(Instances data, RCaller rCaller, RCode code){ //evaluation of the whole dataset
try{
File output = new File("Rdata/dataForR.arff"); // <--- This is the result file
OutputStream out = new FileOutputStream(output);
DataSink.write(out, data);
out.close();
code.clear();
code.addRCode("library(CORElearn)");
code.addRCode("library(RWeka)");
code.addRCode("dataset <- read.arff(\"Rdata/dataForR.arff\")");
code.addRCode("estMDL <- attrEval(which(names(dataset) == names(dataset)[length(names(dataset))]), dataset, estimator=\"MDL\",outputNumericSplits=TRUE)"); //last attribute is class attribute
rCaller.setRCode(code);
rCaller.runAndReturnResultOnline("estMDL");
String tmpRcall[]=rCaller.getParser().getAsStringArray("attrEval"); //name in R "attrEval", get data from R, evaluated attributes
Map<String, Double> mapMDL=new TreeMap<>(Collections.reverseOrder());
for(int i=0;i<data.numAttributes()-1;i++)
mapMDL.put(data.attribute(i).name(),Double.parseDouble(tmpRcall[i])); //we get attribute names from Java (Instances data) and evaluation from R
LinkedList<Map.Entry<String, Double>> listMDL = new LinkedList<>(mapMDL.entrySet());
Comparator<Map.Entry<String, Double>> comparator2 = Comparator.comparing(Map.Entry::getValue);
Collections.sort(listMDL, comparator2.reversed()); //if we want reversed order ... descending order
for(Map.Entry<String, Double> me : listMDL)
if(justExplain)
attrImpListMDL_KD.printf(" %4.4f %s\n",me.getValue(), me.getKey());
else
attrImpListMDL.printf(" %4.4f %s\n",me.getValue(), me.getKey());
deleteTempRFiles(); //better than rCaller.deleteTempFiles(); deleteTempFiles() sometimes does not delete all tmp files
output.delete();
}
catch (Exception ex){
System.out.println("Error in the method mdlCORElearn");
Logger.getLogger(FeatConstr.class.getName()).log(Level.SEVERE, null, ex);
}
}
public static void reliefFcalcDistanceOnAttributes(Instances dataOrig, Instances dataAfterCI){
try{
ReliefFcalcDistOnOrigAttr evals=new ReliefFcalcDistOnOrigAttr(dataOrig, dataAfterCI); //we set original and enriched dataset with constructor, we don't need buildEvaluator method; usage ReliefFcalcDistOnOrigAttr(dataOrig, dataAfterCI)
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(dataAfterCI);
if(justExplain)
attrImpListReliefF_KD.println(attSel.toResultsString().substring(attSel.toResultsString().indexOf("Ranked attributes"),attSel.toResultsString().indexOf("Selected attributes"))); //display the results from the ranking
else
attrImpListReliefF.println(attSel.toResultsString().substring(attSel.toResultsString().indexOf("Ranked attributes"),attSel.toResultsString().indexOf("Selected attributes"))); //display the results from the ranking
}
catch (Exception ex){
System.out.println("Error in the method reliefFcalcDistanceOnAttributes");
Logger.getLogger(FeatConstr.class.getName()).log(Level.SEVERE, null, ex);
}
}
public static void lowLevelReliefF(Instances data) throws Exception {
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
ReliefFAttributeEval evals = new ReliefFAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
String out=attSel.toResultsString().substring(attSel.toResultsString().indexOf("Ranked attributes"),attSel.toResultsString().indexOf("Selected attributes"));
if(justExplain)
attrImpListReliefF_KD.println(out); //display the results from the ranking
else
attrImpListReliefF.println(out); //display the results from the ranking
}
public static void lowLevelInfoGain(Instances data) throws Exception {
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
InfoGainAttributeEval evals = new InfoGainAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
String out=attSel.toResultsString().substring(attSel.toResultsString().indexOf("Ranked attributes"),attSel.toResultsString().indexOf("Selected attributes"));
out=out.replaceAll("[\r\n]+", "\n");
String[] lines = out.split("\\r?\\n"); //for win view, just to have nice report ;)
for (String line : lines){
System.out.printf("%s \r\n",line);
logFile.printf("%s \r\n",line);
}
}
public static void lowLevelGainRatio(Instances data) throws Exception{
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
GainRatioAttributeEval evals = new GainRatioAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
String out=attSel.toResultsString().substring(attSel.toResultsString().indexOf("Ranked attributes"),attSel.toResultsString().indexOf("Selected attributes"));
out=out.replaceAll("[\r\n]+", "\n");
String[] lines = out.split("\\r?\\n"); //for win view, just to have nice report ;)
for (String line : lines) {
System.out.printf("%s \r\n",line);
logFile.printf("%s \r\n",line);
}
}
public static double [][] lowLevelReliefFAttrSel(Instances data) throws Exception {
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
ReliefFAttributeEval evals = new ReliefFAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
double out[][]=attSel.rankedAttributes();
return out;
}
public static double [][] lowLevelInfoGainAttrSel(Instances data) throws Exception {
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
InfoGainAttributeEval evals = new InfoGainAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
double out[][]=attSel.rankedAttributes();
return out;
}
public static double [][] lowLevelGainRatioAttrSel(Instances data) throws Exception {
AttributeSelection attSel = new AttributeSelection();
Ranker search = new Ranker();
GainRatioAttributeEval evals = new GainRatioAttributeEval();
attSel.setRanking(true);
attSel.setEvaluator(evals);
attSel.setSearch(search);
attSel.SelectAttributes(data);
double out[][]=attSel.rankedAttributes();
return out;
}
//we take whole original dataset
public static Instances justExplainAndConstructFeat(Instances dataset, Classifier predictionModel, boolean isClassification, RCaller rCaller, RCode code) throws Exception{
System.out.println("Explaining dataset, making constructs ...");
Instances trainFold = new Instances(dataset); //we use all instances for train
trainFold.setClassIndex(trainFold.numAttributes()-1);
Random rnd = new Random(1);
int minN=minNoise;
if(trainFold.classAttribute().isNumeric())
isClassification=false;
trainFold.setClassIndex(trainFold.numAttributes()-1);
namesOfDiscAttr(trainFold); //save discretization intervals
//heuristics for class selection for explanations
//table with frequencies for each class - how many instances occur in a particular class
double [] classDistr=Arrays.stream(trainFold.attributeStats(trainFold.classIndex()).nominalCounts).asDoubleStream().toArray(); //we convert because we need in log2Multinomial as parameter double array
for(int i=0;i<minIndexClassifiers(classDistr).length;i++){
if(minIndexClassifiers(classDistr)[i].v>=Math.ceil(trainFold.numInstances()*instThr/100.00)){ //we choose class to explain - class has to have at least instThr pct of whole instances
classToExplain=minIndexClassifiers(classDistr)[i].i;
break;
}
}
double allExplanations[][]=null, allWeights[][]=null;
float allExplanationsSHAP[][], allWeightsSHAP[][]=null;
List<String>impInter=null;
Set<String> attrGroups= new LinkedHashSet<>(); //we want to keep the order of insertion and we don't want duplicates so LinkedHashSet
int numClasses=1; //1 - just one iteration, we explain minority class, otherwise numClasses=classDistr.length;
if(explAllClasses)
numClasses=classDistr.length;
/*SHAP*/
if(treeSHAP){
/*XGBOOST*/
for(int c=0;c<numClasses;c++){ //we explain all classes
if(explAllClasses)
classToExplain=c;
Instances explainData=new Instances(trainFold);
RemoveWithValues filter = new RemoveWithValues();
filter.setAttributeIndex("last"); //class
filter.setNominalIndices((classToExplain+1)+""); //what we remove ... if we invert selection than we keep ... +1 indexes go from 0, we need indexes from 1 for method setNominalIndices
filter.setInvertSelection(true);
filter.setInputFormat(explainData);
explainData = Filter.useFilter(explainData, filter);
numInst=trainFold.attributeStats(trainFold.classIndex()).nominalCounts[classToExplain]; //number of instances (from the specified class) to explain //classToExplain instead of i if we explain just one class
if(numInst==0)
continue; //even this is possible, class has no instances e.g., class autos
System.out.println("Explaining class: "+trainFold.classAttribute().value(classToExplain)+" explaining whole dataset: "+(explAllData?"YES":"NO")); //classToExplain instead of i if we explain just one class
impGroupsKD.println("Explaining class: "+trainFold.classAttribute().value(classToExplain)+" explaining whole dataset: "+(explAllData?"YES":"NO"));
DMatrix trainMat = wekaInstancesToDMatrix(trainFold);
DMatrix explainMat = wekaInstancesToDMatrix(explainData);
float tmpContrib[][];
int numOfClasses=trainFold.numClasses();
HashMap<String, Object> params = new HashMap<>();
params.put("eta", eta); //"eta": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] It is advised to have small values of eta in the range of 0.1 to 0.3 because of overfitting
params.put("max_depth", maxDepth);
params.put("silent", 1);
params.put("nthread", processors);
params.put("gamma", gamma); //"gamma": [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], gamma works by regularising using "across trees" information
if(numOfClasses==2){ //for binary examples
params.put("objective", "binary:logistic"); //binary:logistic – logistic regression for binary classification, returns predicted probability (not class)
params.put("eval_metric", "error");
}
else{ //multi class problems
params.put("objective", "multi:softmax"); //multi:softprob multi:softmax
params.put("eval_metric", "merror");
params.put("num_class", (numOfClasses));
}
Map<String, DMatrix> watches = new HashMap<>();
watches.put("train", trainMat);
Booster booster = XGBoost.train(trainMat, params, numOfRounds, watches, null, null);
String evalNameTrain[]={"train"};
DMatrix [] trainMatArr={trainMat};
//last param in evalSet has no sense, just index always returns evaluation of the last iteration ... because we put in booster (booster = XGBoost.train) last iteration?
String accTrain=booster.evalSet(trainMatArr, evalNameTrain,0);
System.out.println("Internal (during building) accuracy of explanation model: "+(1-Double.parseDouble(accTrain.split(":")[1]))*100); //internal evaluation of the model
impGroupsKD.println("Internal (during building) accuracy of explanation model: "+(1-Double.parseDouble(accTrain.split(":")[1]))*100);
impGroupsKD.println("*********************************************************************************");
if(explAllData)
tmpContrib=booster.predictContrib(trainMat, 0); //Tree SHAP ... for each feature, and last for bias matrix of size (?nsample, nfeats + 1) ... feature contributions (SHAP? xgboost predict)
else
tmpContrib=booster.predictContrib(explainMat, 0);
trainMatArr=null;
booster.dispose();
trainMat.dispose();
explainMat.dispose();
if(numOfClasses==2){
allExplanationsSHAP=removeCol(tmpContrib, tmpContrib[0].length-1); //we remove last column, because we do not need column with bias
}
else{
int [] idxQArr=new int[trainFold.numAttributes()-1];
if(classToExplain==0)
for(int i=0;i<idxQArr.length;i++)
idxQArr[i]=i;
else{
int start=(classToExplain*trainFold.numAttributes()-1)+1;
int j=0;
for(int i=start;i<=idxQArr.length*(classToExplain+1);i++){
idxQArr[j]=i;
j++;
}
}
allExplanationsSHAP= someColumns(tmpContrib, idxQArr); //we take just columns of attributes from the class that we explain
}
if(numInst<minExplInst)
minN=minMinNoise;
double noiseThr=(explainData.numInstances()*NOISE)/100.0; //we take number of noise threshold from the number of explained instances
int usedNoise=Math.max((int)Math.ceil(noiseThr),minN); //makes sense only if NOISE=0
if(!fileName.contains("justForAccurateTime")){ //because of benchmarking ... java optimization etc.
System.out.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Tree SHAP num of expl. inst. "+(explAllData ? trainFold.numInstances() : numInst));
impGroupsKD.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Tree SHAP num of expl. inst. "+(explAllData ? trainFold.numInstances() : numInst));
impGroupsKD.println("Lower threshold thrL: "+thrL+" upper threshold thrU: "+thrU+" with step: "+step);
}
for(double q=thrL;q<=thrU;q=q+step){
if(!fileName.contains("justForAccurateTime")){ //because of benchmarking ... java optimization etc.
impGroupsKD.println("--------------");
impGroupsKD.printf("Threshold: %2.2f\n",round(q,1));
impGroupsKD.println("--------------");
}
allWeightsSHAP=setWeights(trainFold,allExplanationsSHAP,round(q,1));
impInter=(getMostFqSubsets(allWeightsSHAP,trainFold,usedNoise));
attrGroups.addAll(impInter);
}
}//loop explain all classes SHAP
}
else{
predictionModel.buildClassifier(trainFold);
if(excludeUppers(predictionModel.getClass().getSimpleName()).equals("RF")){ //OOB ia also calculated
RandomForest rf=(RandomForest)predictionModel;
System.out.print("Internal evaluation of the model (OOB): "+(1-rf.measureOutOfBagError())*100+" ");
impGroupsKD.print("Internal evaluation of the model (OOB): "+(1-rf.measureOutOfBagError())*100+" ");
}
if(excludeUppers(predictionModel.getClass().getSimpleName()).equals("SMO")){
SMO svm=(SMO)predictionModel;
System.out.print("Kernel in SMO: "+svm.getKernel()+" ");
impGroupsKD.print("Kernel in SMO: "+svm.getKernel()+" ");
}
/*IME*/
for(int i=0;i<numClasses;i++){//we explain all classes
if(explAllClasses)
classToExplain=i;
Instances explainData=new Instances(trainFold);
RemoveWithValues filter = new RemoveWithValues();
filter.setAttributeIndex("last") ; //class
filter.setNominalIndices((classToExplain+1)+""); //what we remove ... if we invert selection than we keep ... +1 indexes go from 0, we need indexes from 1 for method setNominalIndices
filter.setInvertSelection(true);
filter.setInputFormat(explainData);
explainData = Filter.useFilter(explainData, filter);
System.out.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
impGroupsKD.println("IME (explanation), "+method.name()+", "+(method.name().equals("adaptiveSampling") ? "min samples: "+minS+", sum of samples: "+sumOfSmp : method.name().equals("diffSampling")?"min samples: "+minS:"N_SAMPLES: "+N_SAMPLES)+" - alg. for searching concepts: "+predictionModel.getClass().getSimpleName());
System.out.println("Explaining class: "+trainFold.classAttribute().value(classToExplain)+", explaining whole dataset: "+(explAllData?"YES":"NO")); //classToExplain instead of i if we explain just one class
impGroupsKD.println("Explaining class: "+trainFold.classAttribute().value(classToExplain)+", explaining all dataset: "+(explAllData?"YES":"NO"));
System.out.println("---------------------------------------------------------------------------------");
impGroupsKD.println("---------------------------------------------------------------------------------");
switch (method){
case aproxErrSampling:
System.out.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
impGroupsKD.println("Sampling based on mi=(<1-alpha, e>), pctErr = "+pctErr+" error = "+error+".");
System.out.println("---------------------------------------------------------------------------------");
impGroupsKD.println("---------------------------------------------------------------------------------");
break;
}
numInst=trainFold.attributeStats(trainFold.classIndex()).nominalCounts[classToExplain]; //number of explained instances; instances from the explained class //classToExplain instead of i if we explain just one class
if(numInst==0)
continue; //even this is possible, class has no instances e.g., class autos
if(!explAllData){
if(numInst>maxToExplain){
System.out.println("We take only "+maxToExplain+" instances out of "+numInst+".");
impGroupsKD.println("We take only "+maxToExplain+" instances out of "+numInst+".");
explainData.randomize(rnd);
explainData = new Instances(explainData, 0, maxToExplain);
numInst=explainData.attributeStats(explainData.classIndex()).nominalCounts[classToExplain]; //for correct print on output
}
switch (method){
case equalSampling:
allExplanations=IME.explainAllDatasetES(trainFold, explainData, predictionModel, N_SAMPLES, classToExplain);//equal sampling
break;
case adaptiveSamplingSS:
allExplanations=IME.explainAllDatasetAS(trainFold, explainData, predictionModel, minS, sumOfSmp, classToExplain);//we need sumOfSmp (sum of samples) for additive sampling
break;
case adaptiveSamplingAE:
allExplanations=IME.explainAllDatasetAS(trainFold, explainData, predictionModel, minS, classToExplain, error, pctErr);
break;
case aproxErrSampling:
allExplanations=IME.explainAllDatasetAES(predictionModel, trainFold, explainData, true, classToExplain, minS, error, pctErr);
break;
}
}
else{
switch (method){
case equalSampling:
allExplanations=IME.explainAllDatasetES(trainFold, trainFold, predictionModel, N_SAMPLES, classToExplain);//equal sampling
break;
case adaptiveSamplingSS:
allExplanations=IME.explainAllDatasetAS(trainFold, trainFold, predictionModel, minS, sumOfSmp, classToExplain);//we need sumOfSmp (sum of samples) for additive sampling
break;
case adaptiveSamplingAE:
allExplanations=IME.explainAllDatasetAS(trainFold, trainFold, predictionModel, minS, classToExplain, error, pctErr);
break;
case aproxErrSampling:
allExplanations=IME.explainAllDatasetAES(predictionModel, trainFold, trainFold, true, classToExplain, minS, error, pctErr);
break;
}
}
if(numInst<minExplInst)
minN=minMinNoise;
double noiseThr=(numInst*NOISE)/100.0;//we take number of noise threshold from the number of explained instances
int usedNoise=Math.max((int)Math.ceil(noiseThr),minN); //makes sense only if NOISE=0 or num of explained instances is very low
System.out.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Number of instances from class ("+explainData.classAttribute().value(classToExplain)+") is "+numInst);
impGroupsKD.println("We remove max(NOISE,minNoise) groups, NOISE="+NOISE+"% -> "+(int)Math.ceil(noiseThr)+ ", minNoise="+minN+" we remove groups of size "+usedNoise+". Number of instances from class ("+explainData.classAttribute().value(classToExplain)+") is "+numInst);
impGroupsKD.println("Lower threshold thrL: "+thrL+" upper threshold thrU: "+thrU+" with step: "+step);
for(double q=thrL;q<=thrU;q=q+step){
impGroupsKD.println("--------------");
impGroupsKD.printf("Threshold: %2.2f\n",round(q,1));
impGroupsKD.println("--------------");
allWeights=setWeights(trainFold,allExplanations,round(q,1));
impInter=(getMostFqSubsets(allWeights,trainFold,usedNoise));
attrGroups.addAll(impInter);
}
} //explain both (all) classes IME
}
listOfConcepts = new ArrayList<>(attrGroups);
impGroupsKD.println("*********************************************************************************");
impGroupsKD.println("All potential concepts, based on thresholds.");
impGroupsKD.print("\t"); printFqAttrOneRow(listOfConcepts,trainFold);
impGroupsKD.println("\n*********************************************************************************");
int N2=2;
List allCombSecOrd=allCombOfOrderN(listOfConcepts,N2); //create groups for second ordered features
//logical features
if(logFeat){
//depth 2
for(String op : operationLogUse)
trainFold= addLogFeatDepth(trainFold, allCombSecOrd,OperationLog.valueOf(op), false, 0, N2); //0 - we don't count unInf features
//construction depth higher than 2
if(featDepth>2){
for(int i=3;i<=featDepth; i++){
List allCombNthOrd=allCombOfOrderN(listOfConcepts,i);
for(String op : operationLogUse)
if(OperationLog.valueOf(op)==OperationLog.AND || OperationLog.valueOf(op)==OperationLog.OR)
trainFold= addLogFeatDepth(trainFold, allCombNthOrd,OperationLog.valueOf(op), false, 0, i); //0 - we don't count unInf features
}
}
}
//decision rule and threshold features
if(decRuleFeat || thrFeat){
List<String> listOfFeat;
listOfFeat=genFeatFromFuria(dataset, (ArrayList<String>) listOfConcepts, classToExplain, cf, pci,covering, featFromExplClass); //generate features from Furia, parameter of FURIA cfF=0.7, cfI=0.9 stopping criteria
if(decRuleFeat)
trainFold=addFeatures(trainFold, (ArrayList<String>) listOfFeat, true); //add features from Furia
//num-of-N features ... we are counting true conditions from rules
if(thrFeat)
trainFold=addFeatNumOfN(trainFold, (ArrayList<String>) listOfFeat, true); //add num-Of-N features for evaluation
}
//numerical features
if(numerFeat){
for(String op : operationNumUse)
trainFold=addNumFeat(trainFold, OperationNum.valueOf(op), allCombSecOrd, true, rCaller, code);
}
//relational features
if(relatFeat){
for(String op : operationRelUse)
trainFold=addRelFeat(trainFold,allCombSecOrd,OperationRel.valueOf(op),true,0); //true ... we remove uninformative features, last parameter is here irrelevant, we just put one value
}
//Cartesian features
Instances origData=null;
if(cartFeat){
boolean allDiscrete=true;
for(int i=0;i<dataset.numAttributes();i++)
if(dataset.attribute(i).isNumeric()){ //check if problem is numeric
allDiscrete=false;
System.out.println("We found continuous attribute!");
break;
}
origData = new Instances(dataset); //For the ReliefF evaluation we need original data. The dataset "dataset" is changed below.
if(!allDiscrete){
//discretization
weka.filters.supervised.attribute.Discretize filterDis; //because of same class name in different packages
// setup filter
filterDis = new weka.filters.supervised.attribute.Discretize();
//filter.
dataset.setClassIndex(dataset.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
filterDis.setInputFormat(dataset);
// apply filter
dataset = Filter.useFilter(dataset, filterDis);
}
trainFold=addCartFeat(trainFold, dataset,allCombSecOrd,false,0,N2,true);
}
//evaluation
attrImpListMDL_KD.println("MDL - after CI");
mdlCORElearn(trainFold, rCaller, code);
attrImpListReliefF_KD.println("ReliefF - after CI");
if(cartFeat)
reliefFcalcDistanceOnAttributes(origData, trainFold);
else
reliefFcalcDistanceOnAttributes(dataset, trainFold);
System.out.println("Constructs have been done!");
System.out.println("*********************************************************************************");
if(visualisation){
System.out.println("Drawing ...");
visualizeModelInstances(visualModel, trainFold, true, RESOLUTION, numOfImpt, visFrom, visTo); //visualise explanations from e.g., 50th to 60 instance
System.out.println("Drawing is finished!");
}
return trainFold;
}
//for original model origDataset and enrichedDataset are the same
public static ModelAndAcc evaluateModel(Instances train, Instances test, Classifier model) throws Exception{ //the sum of all terms in constructs
ModelAndAcc ma=new ModelAndAcc();
Instances trainFold=new Instances(train);
model.buildClassifier(trainFold);
Evaluation eval = new Evaluation(trainFold);
eval.evaluateModel(model, test);
ma.setClassifier(model);
ma.setAcc((eval.correct())/(eval.incorrect()+eval.correct())*100.00); // the same ma.setAcc((1.0-eval.errorRate())*100.0);
return ma;
}
public static ParamSearchEval paramSearch(Instances train, Instances test, Classifier predictionModel, int numOfAttr, int split, RCaller rCaller, RCode code) throws Exception{
Instances validation, subTrain, tmpValidation, tmpSubTrain;
ParamSearchEval pse=new ParamSearchEval();
StratifiedRemoveFolds fold;
double attrImp;
double intClassAcc=0; //internal class accuracy - class acc of paramSearch loop
double maxIntAcc; //maksimal internal acc
ArrayList <Parameters> bestRndParam;
String listOfUnInFeat;
Remove remove;
String bestParam, attName;
int feat[]=new int[6]; //for counting logical, thr, FURIA, Cartesian, relational and numerical features
int tree[]=new int[3]; //for counting tree size, number of leaves and number of constructs
int numLogInTree[]=new int[2]; //number of logical features and constructs in tree
int nC[]=new int[2]; //for counting Cartesian features in tree
int nR[]=new int[2]; //for counting relational features in tree
int nN[]=new int[2]; //for counting numerical features in tree
int complexityF[]=new int[2]; //for counting complexity parameters of Furia
int furiaThrC[]=new int[4]; //for Furia and Thr feat complexity
int tmp[];
long time[]=new long[2]; //0-feature construction time, 1-learning time
Timer t1=new Timer();
bestRndParam =new ArrayList<>();
maxIntAcc=0;
fold = new StratifiedRemoveFolds();
fold.setInputFormat(train);
fold.setSeed(1);
fold.setNumFolds(split);
fold.setFold(split);
fold.setInvertSelection(true); //because we invert selection we take all folds except the "split" one
subTrain = Filter.useFilter(train,fold);
fold = new StratifiedRemoveFolds();
fold.setInputFormat(train);
fold.setSeed(1);
fold.setNumFolds(split);
fold.setFold(split);
fold.setInvertSelection(false);
validation = Filter.useFilter(train,fold);
/*FS on validation dataset loop!!!*/
t1.start();
for(int g=0;g<attrImpThrs.length;g++){
listOfUnInFeat="";
tmpSubTrain=new Instances(subTrain);
tmpValidation=new Instances(validation);
for(int j=numOfAttr;j<tmpSubTrain.numAttributes()-1;j++){
if(!tmpSubTrain.attribute(j).isNumeric())
attrImp=calculateAttrImportance(tmpSubTrain, tmpSubTrain.attribute(j).name(), "MDL"); //faster implementation of MDL only for discrete data
else
attrImp=calcFeatImpMDL(tmpSubTrain, j, rCaller, code);
if(attrImp<=attrImpThrs[g])
listOfUnInFeat+=(j+1)+",";
}
if(!listOfUnInFeat.equals("")){
remove= new Remove();
remove.setAttributeIndices(listOfUnInFeat); //rangeList - a string representing the list of attributes. Since the string will typically come from a user, attributes are indexed from 1. e.g.: first-3,5,6-last
remove.setInputFormat(tmpSubTrain);
tmpSubTrain = Filter.useFilter(tmpSubTrain, remove);
remove.setAttributeIndices(listOfUnInFeat);
remove.setInputFormat(tmpValidation);
tmpValidation = Filter.useFilter(tmpValidation, remove);
}
predictionModel.buildClassifier(tmpSubTrain);
Evaluation eval = new Evaluation(tmpSubTrain);
eval.evaluateModel(predictionModel, tmpValidation);
intClassAcc=(eval.correct())/(eval.incorrect()+eval.correct())*100.00;
bestRndParam.add(new Parameters(intClassAcc,"MDL"+"@"+attrImpThrs[g], tmpSubTrain.numAttributes()-1 )); //new version only MDL method
if(intClassAcc>maxIntAcc)
maxIntAcc=intClassAcc;
}
deleteTempRFiles();
t1.stop();
time[0]=t1.diff();
bestParamPerFold.print("Num. of all parameters "+bestRndParam.size()+". ");
for(int j=0;j<bestRndParam.size();){
if(bestRndParam.get(j).getAcc()<maxIntAcc)
bestRndParam.remove(j);
else
j++;
}
bestParamPerFold.println("Num. of max ACC "+bestRndParam.size()+".");
if(bestRndParam.size()>1)
bestParam=bestRndParam.get((int)(Math.random()*bestRndParam.size())).getEvalMeth(); //we take random parameter out of the parameters that have same ACC
else
bestParam=bestRndParam.get(0).getEvalMeth();
listOfUnInFeat="";
for(int j=numOfAttr;j<train.numAttributes()-1;j++){
if(!train.attribute(j).isNumeric()) //we skip evaluation of numerical features
attrImp=calculateAttrImportance(train, train.attribute(j).name(), bestParam.split("@")[0]); //old version attribute(j) ... indexes start from 0!!!
else
attrImp=calcFeatImpMDL(train, j, rCaller, code);
if(attrImp<=Double.parseDouble(bestParam.split("@")[1]))
listOfUnInFeat+=(j+1)+",";
}
deleteTempRFiles();
remove= new Remove();
remove.setAttributeIndices(listOfUnInFeat);
remove.setInputFormat(train);
train = Filter.useFilter(train, remove);
tmp=numOfFeat(train,numOfAttr);
feat[0]+=tmp[0]; //logical feat
feat[1]+=tmp[1]; //threshold feat
feat[2]+=tmp[2]; //decision rule feat (FURIA)
feat[3]+=tmp[3]; //Cartesian feat
feat[4]+=tmp[4]; //relational feat
if(numerFeat)
feat[5]+=tmp[5]; //numerical feat
remove.setInputFormat(test);
test = Filter.useFilter(test, remove);
t1.start();
predictionModel.buildClassifier(train);
Evaluation eval = new Evaluation(train);
eval.evaluateModel(predictionModel, test);
t1.stop();
time[1]=t1.diff();
if(excludeUppers(predictionModel.getClass().getSimpleName()).equals("J48")){
J48 j48=new J48();
j48=(J48)(predictionModel);
tree[0]=(int)j48.measureTreeSize(); //treeSize
tree[1]=(int)j48.measureNumLeaves(); //numOfLeaves
tree[2]=sumOfTermsInConstrInTree(train, numOfAttr, j48); //sumOfTerms
numLogInTree[0]=numOfLogFeatInTree(train, numOfAttr, j48);
numLogInTree[1]=sumOfLFTermsInConstrInTree(train, numOfAttr, j48);
if(numerFeat)
nN=numOfNumFeatInTree(train,numOfAttr, j48);
nR=numOfRelFeatInTree(train,numOfAttr, j48);
nC=numOfCartFeatInTree(train,numOfAttr, j48);
furiaThrC=numOfDrThrFeatInTree(train, numOfAttr, j48);
}
if(excludeUppers(predictionModel.getClass().getSimpleName()).equals("FURIA")){
FURIA fu=new FURIA();
fu=(FURIA)(predictionModel);
complexityF[0]=fu.getRuleset().size();
complexityF[1]=sumOfTermsInConstrInRule(fu.getRuleset(),train);
}
attrImpListMDL.println("MDL (param. search)");
attrImpListMDL.println("The best parameter when using "+predictionModel.getClass().getSimpleName()+": "+bestParam); //use only features above the bestParam MDL score and all attributes
//mdlCORElearn(train, rCaller, code);
pse.setAcc((eval.correct())/(eval.incorrect()+eval.correct())*100.00);
pse.setFeat(feat);
pse.setTree(tree);
pse.setComplexityFuria(complexityF);
pse.setNumLogFeatInTree(numLogInTree);
if(numerFeat)
pse.setNumFeatInTree(nN);
pse.setRelFeatInTree(nR);
pse.setCartFeatInTree(nC);
pse.setFuriaThrComplx(furiaThrC);
pse.setTime(time);
return pse;
}
public static double[][] minMaxNumAttr(Instances data) throws Exception{ //classIndex - 0,1 ... e.g.: {no-recurrence-events,recurrence-events}, we have two indexes no-recurrence-events ... 0, recurrence-events ... 1
//explain attributes' values
//find min and max for numeric attributes
double[][] minMaxForNumericAttributes = new double[data.numAttributes()][2];
for(int i = 0; i < data.numAttributes(); i++){
minMaxForNumericAttributes[i][0] = Double.MAX_VALUE;
minMaxForNumericAttributes[i][1] = -Double.MAX_VALUE;
}
//get range for numeric attributes
Instance tempInst;
for(int i = 0; i < data.numInstances(); i++){
tempInst = data.instance(i);
for(int j = 0; j < data.numAttributes(); j++){
Attribute tempAttr = data.attribute(j);
if(tempAttr.isNumeric()){
if(tempInst.value(j) < minMaxForNumericAttributes[j][0]) minMaxForNumericAttributes[j][0] = tempInst.value(j);
if(tempInst.value(j) > minMaxForNumericAttributes[j][1]) minMaxForNumericAttributes[j][1] = tempInst.value(j);
}
}
}
return minMaxForNumericAttributes;
}
public static float[][] setWeights(Instances data, float allExplanations[][], double THR){ //returns weights based on explanations ... SHAP returns float values
float allWeights[][]=new float[allExplanations.length][allExplanations[0].length];
float absExplanations[][]=clone2DArray(allExplanations);
//make all explanations positive
for(int i=0;i<absExplanations.length;i++){
for(int j=0;j<absExplanations[i].length;j++){
absExplanations[i][j]=Math.abs(absExplanations[i][j]);
}
}
float tmpArr1D[];
double thrTmp;
float rowSum;
for(int i=0;i<absExplanations.length;i++){
thrTmp=0;
tmpArr1D=absExplanations[i].clone();
rowSum=sumArr(tmpArr1D);
for(int a = 0; a < tmpArr1D.length; a++)
tmpArr1D[a] = tmpArr1D[a]/rowSum;
Map<String, Float> mapAttrWeights=new TreeMap<>();
for(int j=0;j<tmpArr1D.length;j++)
mapAttrWeights.put(data.attribute(j).name(),tmpArr1D[j]);
LinkedList<Map.Entry<String, Float>> listAttrWeights= new LinkedList<>(mapAttrWeights.entrySet());
Comparator<Map.Entry<String, Float>> comparator = Comparator.comparing(Map.Entry::getValue);
Collections.sort(listAttrWeights, comparator.reversed()); //if we want reversed order ... descending order
int attrIdx;
for(Map.Entry<String, Float> me : listAttrWeights){
if(thrTmp<THR){
if(me.getValue()!=0){ //if we add 0 to the sum nothing will happen
thrTmp+=me.getValue();
attrIdx=data.attribute(me.getKey()).index();
allWeights[i][attrIdx]=1;
}
} //we are adding to the selection from largest to smallest
else
break;
}
}
return allWeights;
}
public static double[][] setWeights(Instances data, double allExplanations[][], double THR){ //returns weights based on explanations ... IME returns double values
double allWeights[][]=new double[allExplanations.length][allExplanations[0].length];
double absExplanations[][]=clone2DArray(allExplanations);
//make all explanations positive
for(int i=0;i<absExplanations.length;i++){
for(int j=0;j<absExplanations[i].length;j++){
absExplanations[i][j]=Math.abs(absExplanations[i][j]);
}
}
double tmpArr1D[];
double thrTmp;
double rowSum;
for(int i=0;i<absExplanations.length;i++){
thrTmp=0;
tmpArr1D=absExplanations[i].clone();
rowSum=sumArr(tmpArr1D);
for(int a = 0; a < tmpArr1D.length; a++)
tmpArr1D[a] = tmpArr1D[a]/rowSum;
Map<String, Double> mapAttrWeights=new TreeMap<>();
for(int j=0;j<tmpArr1D.length;j++)
mapAttrWeights.put(data.attribute(j).name(),tmpArr1D[j]);
LinkedList<Map.Entry<String, Double>> listAttrWeights= new LinkedList<>(mapAttrWeights.entrySet());
Comparator<Map.Entry<String, Double>> comparator = Comparator.comparing(Map.Entry::getValue);
Collections.sort(listAttrWeights, comparator.reversed()); //if we want reversed order ... descending order
int attrIdx;
for(Map.Entry<String, Double> me : listAttrWeights){
if(thrTmp<THR){
if(me.getValue()!=0){ //if we add 0 to the sum nothing will happen
thrTmp+=me.getValue();
attrIdx=data.attribute(me.getKey()).index();
allWeights[i][attrIdx]=1;
}
} //we are adding to the selection from largest to smallest
else
break;
}
}
return allWeights;
}
public static double computeNumOperation(double leftOperand, OperationNum op, double rightOperand) {
switch(op){
case ADD:
return leftOperand + rightOperand;
case SUBTRACT:
return leftOperand - rightOperand;
case DIVIDE:
if(rightOperand!=0)
return leftOperand / rightOperand;
else
return 0;
case ABSDIFF:
return Math.abs(leftOperand - rightOperand);
}
return 0;
}
public static int computeOperationTwoOperand(int leftOperand, OperationLog op, int rightOperand) {
switch(op){
case AND:
return ((leftOperand==1) && (rightOperand==1)) ? 1 :0;
case OR:
return ((leftOperand==0) && (rightOperand==0)) ? 0 : 1;
case EQU:
return (leftOperand == rightOperand) ? 1 : 0;
case XOR:
return ((leftOperand==0) && (rightOperand==0) || (leftOperand==1) && (rightOperand==1)) ? 0 : 1;
case IMPL:
return ((leftOperand==1) && (rightOperand==0)) ? 0 : 1;
}
return 0;
}
//compute relation operation
public static int computeRelOpTwoOperand(double leftOperand, OperationRel op, double rightOperand) {
switch(op){
case LESSTHAN:
return (leftOperand < rightOperand) ? 1 : 0;
case DIFF:
return (leftOperand != rightOperand) ? 1 : 0;
}
return 0;
}
//for Cartesian product
public static String mergeValues(String leftOperand, String rightOperand) {
return leftOperand+"_x_"+rightOperand;
}
//Cartesian product - merge all values for combination of attr1 and attr2
public static String genDiscValues(Instances data, int idx1, int idx2){
String allValues="";
String tmpAttr1,tmpAttr2;
Enumeration<Object> attr1Val=null;
Enumeration<Object> attr2Val=null;
attr1Val= data.attribute(idx1).enumerateValues();
attr2Val= data.attribute(idx2).enumerateValues();
while (attr1Val.hasMoreElements()){
tmpAttr1=(String)attr1Val.nextElement();
while (attr2Val.hasMoreElements()){
tmpAttr2=(String)attr2Val.nextElement();
allValues+=tmpAttr1+"_x_"+tmpAttr2+",";
}
attr2Val= data.attribute(idx2).enumerateValues();
}
allValues=allValues.substring(0, allValues.lastIndexOf(','));
return allValues;
}
public static String excludeUppers(String str){
String strNew="";
for(int i=0; i<str.length(); i++){
if(Character.isUpperCase(str.charAt(i)) || Character.isDigit(str.charAt(i)))
strNew+=str.charAt(i);
}
return strNew;
}
//returns min index [0] of min element in the array
public static IndexValue[] minIndexClassifiers(double [] arr){
IndexValue[] array = new IndexValue[arr.length];
//Fill the array
for( int i = 0 ; i < arr.length; i++ )
array[i] = new IndexValue(i, arr[i]);
//Sort it
Arrays.sort(array, new Comparator<IndexValue>(){
@Override
public int compare(IndexValue a, IndexValue b){
return Double.compare(a.v , b.v);
}
});
return array;
}
public static double[][] threeDtoTwoD(double [][][] arr, int idx){
double tmp[][]=new double[arr.length][arr[0].length];
for(int i=0;i<arr.length;i++)
for(int j=0;j<arr[i].length;j++)
tmp[i][j]=arr[i][j][idx];
return tmp;
}
public static Map<String, Integer> sortByValue(Map<String, Integer> unsortMap) {
List<Map.Entry<String, Integer>> list =
new LinkedList<>(unsortMap.entrySet());
Collections.sort(list, new Comparator<Map.Entry<String, Integer>>() {
@Override
public int compare(Map.Entry<String, Integer> o1,
Map.Entry<String, Integer> o2) {
return (o2.getValue()).compareTo(o1.getValue());
}
});
Map<String, Integer> sortedMap = new LinkedHashMap<>();
for(Map.Entry<String, Integer> entry : list)
sortedMap.put(entry.getKey(), entry.getValue());
return sortedMap;
}
public static ArrayList<String> getMostFqSubsets(double allWeights[][], Instances data,int pctVal) { //delete all those subsets that are represented less than n%
String attrSets []=new String[allWeights.length];
String tmpSet="";
int x=0,count=0;
for(int i=0;i<allWeights.length;i++){
for(int j=0;j<allWeights[i].length;j++){
if(allWeights[i][j]==1){
if(count==0)
tmpSet=""+j;
else
tmpSet+=","+j; //indexes
count++;
}
}
attrSets[x]=tmpSet;
x++;
tmpSet="";
count=0;
}
if (ArrayUtils.contains( attrSets, "" )){
List<String> list = new ArrayList<>(Arrays.asList(attrSets));
list.removeAll(Collections.singleton(""));
attrSets=list.toArray(new String[list.size()]);
}
Map<String, Integer> sortedMap = sortByValue(computeWordFrequencyMap(attrSets)); //computeWordFrequencyMap(words) returns frequencyMap
sortedMap.entrySet().removeIf(entry -> entry.getValue() <= pctVal); //delete all those subsets that are less or equaly represented in n% of the explained cases
printMap2(sortedMap, data); //skupine atributov in njihove frekvence
ArrayList<String> result2 = new ArrayList(sortedMap.keySet()); //sortedMap.keySet() ... attribute indexes (frequent attributes), sortedMap.values() ... occurrence frequencies
List removedList = new ArrayList();
for(String temp : result2){
if(temp.split(",").length<2)
removedList.add(temp);
}
result2.removeAll(removedList);
return result2;
}
public static ArrayList<String> getMostFqSubsets(float allWeights[][], Instances data, int pctVal){ //delete all subsets that are representented less than n%... for the SHAP version
String attrSets []=new String[allWeights.length];
String tmpSet="";
int x=0,count=0;
for(int i=0;i<allWeights.length;i++){
for(int j=0;j<allWeights[i].length;j++){
if(allWeights[i][j]==1){
if(count==0)
tmpSet=""+j;
else
tmpSet+=","+j; //indexes
count++;
}
}
attrSets[x]=tmpSet;
x++;
tmpSet="";
count=0;
}
if (ArrayUtils.contains( attrSets, "" )){
List<String> list = new ArrayList<String>(Arrays.asList(attrSets));
list.removeAll(Collections.singleton(""));
attrSets=list.toArray(new String[list.size()]);
}
Map<String, Integer> sortedMap = sortByValue(computeWordFrequencyMap(attrSets)); //computeWordFrequencyMap(words) returns frequencyMap
sortedMap.entrySet().removeIf(entry -> entry.getValue() <= pctVal); //delete all subsets that are represented in less than n% explained instances
printMap2(sortedMap, data); //groups of attributes and their frequencies
ArrayList<String> result2 = new ArrayList(sortedMap.keySet()); //sortedMap.keySet() ... attribute indexes (frequent attributes), sortedMap.values() ... occurrence frequencies
List removedList = new ArrayList();
for(String temp : result2){
if(temp.split(",").length<2)
removedList.add(temp);
}
result2.removeAll(removedList);
return result2;
}
public static Map<String, Integer> computeWordFrequencyMap(String[] words) {
Map<String, Integer> result = new HashMap<>(words.length);
for(String word : words)
result.put(word, result.getOrDefault(word, 0) + 1);
return result;
}
public static double[][] absArray(double arr [][]){
for(int i=0; i<arr.length;i++)
for(int j=0;j<arr[i].length;j++)
arr[i][j]=Math.abs(arr[i][j]);
return arr;
}
public static boolean isEmpty(String arr []){
for(String x:arr)
if(!x.isEmpty())
return false;
return true;
}
public static double[][] clone2DArray(double[][] a) {
double[][] b = new double[a.length][];
for(int i = 0; i < a.length; i++) {
b[i] = new double[a[i].length];
for(int j = 0; j < a[i].length; j++)
b[i][j] = a[i][j];
}
return b;
}
public static float[][] clone2DArray(float[][] a) {
float[][] b = new float[a.length][];
for(int i = 0; i < a.length; i++){
b[i] = new float[a[i].length];
for(int j = 0; j < a[i].length; j++)
b[i][j] = a[i][j];
}
return b;
}
public static double calculateAttrImportance(Instances data, String attName, String evaluationAlg) throws Exception{
double idxTab[][]=null;
Instances newData=new Instances(data);
Remove remove= new Remove();
remove.setAttributeIndices((newData.attribute(attName).index()+1)+",last");
remove.setInvertSelection(true);
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove); //just one attribute and class
KononenkosMDL kMDL=new KononenkosMDL();
switch(evaluationAlg){
case "ReliefF":
idxTab=lowLevelReliefFAttrSel(newData); //evaluation of added feature - dataset consists of only added feature and class
break;
case "GainRatio":
idxTab=lowLevelGainRatioAttrSel(newData);
break;
case "InfoGain":
idxTab=lowLevelInfoGainAttrSel(newData);
break;
case "MDL":
return kMDL.kononenkosMDL(newData,newData.attribute(attName).index());
case "NoEvaluation":
return 999; //just one "high" value ... more than 1 - this means we take all attributes
default:
System.out.println("Wrong evaluation method!");
}
return idxTab[0][1]; //importance of one feature
}
public static double calcFeatImpMDL(Instances data, int j, RCaller rCaller, RCode code) throws Exception{ //evaluation of one feature
Instances newData=new Instances(data);
Remove remove= new Remove();
remove.setAttributeIndices((newData.attribute(data.attribute(j).name()).index()+1)+",last");
remove.setInvertSelection(true);
remove.setInputFormat(newData);
newData = Filter.useFilter(newData, remove); //just one attribute and class
File output;
OutputStream out;
double featEval=-999;
try{
output = new File("Rdata/dataForROneAttr.arff");// <--- This is the result file
out = new FileOutputStream(output);
DataSink.write(out, newData);
out.close();
code.clear();
/**********************************R code************************************************************************************/
code.addRCode("library(CORElearn)");
code.addRCode("library(RWeka)");
code.addRCode("dataset <- read.arff(\"Rdata/dataForROneAttr.arff\")");
code.addRCode("estMDL <- attrEval(which(names(dataset) == names(dataset)[length(names(dataset))]), dataset, estimator=\"MDL\",outputNumericSplits=TRUE)"); //last attribute is class attribute
rCaller.setRCode(code);
rCaller.runAndReturnResultOnline("estMDL"); //When you are done with this process, you must explicitly stop it!
String tmpRcall[]=rCaller.getParser().getAsStringArray("attrEval"); //name in R "attrEval", get data from R, evaluated attributes
featEval=Double.parseDouble(tmpRcall[0]);
//deleteTempRFiles(); is performed after FS on validation set and after taking features for training set
output.delete(); //delete temp file
}
catch(Exception ex){
System.out.println("Error in the method mdlCORElearn");
Logger.getLogger(FeatConstr.class.getName()).log(Level.SEVERE, null, ex);
}
return featEval; //importance of one feature
}
public static DMatrix wekaInstancesToDMatrix(Instances insts) throws XGBoostError {
int numRows = insts.numInstances();
int numCols = insts.numAttributes()-1;
float[] data = new float[numRows*numCols];
float[] labels = new float[numRows];
int ind = 0;
for (int i = 0; i < numRows; i++){
for (int j = 0; j < numCols; j++)
data[ind++] = (float) insts.instance(i).value(j);
labels[i] = (float) insts.instance(i).classValue();
}
DMatrix dmat = new DMatrix(data, numRows, numCols);
dmat.setLabel(labels);
return dmat;
}
public static float[][] removeCol(float [][] array, int colRemove){ //remove column
int row = array.length;
int col = array[0].length;
float [][] newArray = new float[row][col-1];
for(int i = 0; i < row; i++){
for(int j = 0; j < colRemove; j++){
newArray[i][j] = array[i][j];
}
for(int j = colRemove; j < col-1; j++){
newArray[i][j] = array[i][j+1];
}
}
return newArray;
}
public static float[][] someColumns(float origTab[][], int [] selectedColumns){
float newArray[][]=new float[origTab.length][selectedColumns.length];
for(int i=0;i<origTab.length;i++){
for(int j=0;j<selectedColumns.length;j++)
newArray[i][j] = origTab[i][selectedColumns[j]];
}
return newArray;
}
public static void namesOfDiscAttr(Instances trainData){
Instances newData;
NominalToBinary nominalToBinary = new NominalToBinary();
weka.filters.supervised.attribute.Discretize discFilter; //because of the same class name in different packages
discFilter = new weka.filters.supervised.attribute.Discretize();
Remove remove= new Remove();
String indices="";
boolean allDiscrete=true;
try{
for(int i=0;i<trainData.numAttributes()-1;i++)
if(trainData.attribute(i).isNumeric()){
allDiscrete=false;
break;
}
if(!allDiscrete){
//get indices of numeric attributes, we will discretize only numeric attributes
for(int i=0;i<trainData.numAttributes()-1;i++)
if(trainData.attribute(i).isNumeric())
indices+=(i+1)+",";
remove.setAttributeIndices(indices+",last");
remove.setInvertSelection(true);
remove.setInputFormat(trainData);
trainData = Filter.useFilter(trainData, remove);
trainData.setClassIndex(trainData.numAttributes()-1); //we need class index for Fayyad & Irani's MDL
discFilter.setInputFormat(trainData);
newData=Filter.useFilter(trainData, discFilter);
nominalToBinary.setInputFormat(newData);
newData = Filter.useFilter(newData, nominalToBinary);
for(int i=0;i<newData.numAttributes()-1;i++)
if(justExplain)
discIntervalsKD.println(newData.attribute(i).name());
else
discIntervals.println(newData.attribute(i).name());
}
else
if(justExplain)
discIntervalsKD.println("No numeric attributes.");
else
discIntervals.println("No numeric attributes.");
}
catch(Exception e) {
System.out.println("ERROR in method namesOfDiscAttr"+e.toString());
}
}
public static List interInfoJakulin(Instances data, List allCombSecOrd, int N) throws Exception{
String tmpArr[];
int idx1, idx2;
int idx3=data.numAttributes()-1; //class attribute
double a,b,c,ab,ac,bc,abc;
double intInf;
Map<String, Double> mapIG=new TreeMap<>(Collections.reverseOrder());
double arr1Doub[];
double arr2Doub[];
double arr3Doub[];
Enumeration<Object> attr1Val=null;
Enumeration<Object> attr2Val=null;
Enumeration<Object> attr3Val=null;
//get all values from the class
attr3Val= data.attribute(idx3).enumerateValues();
//get frequencies for class
arr3Doub=getFrequencies(data, attr3Val, idx3); //class
for(int i=0;i<allCombSecOrd.size();i++){
tmpArr=allCombSecOrd.get(i).toString().replace("[","").replace("]", "").trim().split(",");
idx1=Integer.parseInt(tmpArr[0].trim());
idx2=Integer.parseInt(tmpArr[1].trim());
//get all possible values of the attribute - enumerate
attr1Val= data.attribute(idx1).enumerateValues();
attr2Val= data.attribute(idx2).enumerateValues();
//get frequencies for attribute pairs
arr1Doub=getFrequencies(data, attr1Val, idx1);
arr2Doub=getFrequencies(data, attr2Val, idx2);
a=ContingencyTables.entropy(arr1Doub);
b=ContingencyTables.entropy(arr2Doub);
c=ContingencyTables.entropy(arr3Doub);
ab=entropy2Attr(data,idx1,idx2);
bc=entropy2Attr(data,idx2,idx3);
ac=entropy2Attr(data,idx1,idx3);
abc=entropy3Attr(data,idx1,idx2,idx3);
//interaction information by Jakulin
intInf=ab+bc+ac-a-b-c-abc;
mapIG.put("["+idx1+","+idx2+"]",intInf);
}
mapIG=orderMapByDescValue(mapIG);
ArrayList<String> result2 = new ArrayList(mapIG.keySet());
List<String> firstNElementsList = result2.stream().limit(result2.size()< N ? result2.size() : N).collect(Collectors.toList());
return firstNElementsList;
}
public static double entropy2Attr(Instances data, int idx1, int idx2){
String arr1[]=null, arr2[]=null;
String tmpAttr1="",tmpAttr2="";
Enumeration<Object> attr1Val=null;
Enumeration<Object> attr2Val=null;
attr1Val= data.attribute(idx1).enumerateValues();
attr2Val= data.attribute(idx2).enumerateValues();
//get frequencies for attr1
while (attr1Val.hasMoreElements())
tmpAttr1+=(String)attr1Val.nextElement()+",";
arr1=tmpAttr1.split(",");
double arr1Doub[]=new double[arr1.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx1);
for(int j=0;j<arr1.length;j++)
if(tmp.equals(arr1[j]))
arr1Doub[j]++;
}
//get frequencies for attr2
while (attr2Val.hasMoreElements())
tmpAttr2+=(String)attr2Val.nextElement()+",";
arr2=tmpAttr2.split(",");
double arr2Doub[]=new double[arr2.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx2);
for(int j=0;j<arr2.length;j++)
if(tmp.equals(arr2[j]))
arr2Doub[j]++;
}
double contingTab2[][]=new double[arr1.length][arr2.length];
int idxI=0,idxJ=0;
for(int i=0;i<data.numInstances();i++){
String tmp1=data.instance(i).stringValue(idx1);
String tmp2=data.instance(i).stringValue(idx2);
for(int j=0;j<arr1.length;j++){
if(arr1[j].equals(tmp1)){
idxI=j;
break;
}
}
for(int k=0;k<arr2.length;k++){
if(arr2[k].equals(tmp2)){
idxJ=k;
break;
}
}
contingTab2[idxI][idxJ]++;
}
return ContingencyTables.entropy(flatten2dTo1d(contingTab2));
}
public static double entropy3Attr(Instances data, int idx1, int idx2, int idx3){
String arr1[]=null, arr2[]=null, arr3[]=null;
String tmpAttr1="",tmpAttr2="",tmpAttr3="";
Enumeration<Object> attr1Val=null;
Enumeration<Object> attr2Val=null;
Enumeration<Object> attr3Val=null;
attr1Val= data.attribute(idx1).enumerateValues();
attr2Val= data.attribute(idx2).enumerateValues();
attr3Val= data.attribute(idx3).enumerateValues();
//get frequencies for attr1
while (attr1Val.hasMoreElements())
tmpAttr1+=(String)attr1Val.nextElement()+",";
arr1=tmpAttr1.split(",");
double arr1Doub[]=new double[arr1.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx1);
for(int j=0;j<arr1.length;j++)
if(tmp.equals(arr1[j]))
arr1Doub[j]++;
}
//get frequencies for attr2
while (attr2Val.hasMoreElements())
tmpAttr2+=(String)attr2Val.nextElement()+",";
arr2=tmpAttr2.split(",");
double arr2Doub[]=new double[arr2.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx2);
for(int j=0;j<arr2.length;j++)
if(tmp.equals(arr2[j]))
arr2Doub[j]++;
}
//get frequencies for attr3
while (attr3Val.hasMoreElements())
tmpAttr3+=(String)attr3Val.nextElement()+",";
arr3=tmpAttr3.split(",");
double arr3Doub[]=new double[arr3.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx3);
for(int j=0;j<arr3.length;j++)
if(tmp.equals(arr3[j]))
arr3Doub[j]++;
}
double contingTab3[][][]=new double[arr1.length][arr2.length][arr3.length];
int idxI=0, idxJ=0, idxK=0;
String tmp1, tmp2, tmp3;
for(int i=0;i<data.numInstances();i++){
tmp1=data.instance(i).stringValue(idx1);
tmp2=data.instance(i).stringValue(idx2);
tmp3=data.instance(i).stringValue(idx3);
for(int j=0;j<arr1.length;j++){
if(arr1[j].equals(tmp1)){
idxI=j;
break;
}
}
for(int k=0;k<arr2.length;k++){
if(arr2[k].equals(tmp2)){
idxJ=k;
break;
}
}
for(int l=0;l<arr3.length;l++){
if(arr3[l].equals(tmp3)){
idxK=l;
break;
}
}
contingTab3[idxI][idxJ][idxK]++;
}
return ContingencyTables.entropy(flatten3dTo1d(contingTab3));
}
//for entropy - Jakulin's method
public static double[] getFrequencies(Instances data, Enumeration<Object> attrVal, int idx){
String tmpAttr="";
String arr[]=null;
double arrDoub[];
while (attrVal.hasMoreElements())
tmpAttr+=(String)attrVal.nextElement()+",";
arr=tmpAttr.split(",");
arrDoub=new double[arr.length];
for(int i=0;i<data.numInstances();i++){
String tmp=data.instance(i).stringValue(idx);
for(int j=0;j<arr.length;j++)
if(tmp.equals(arr[j]))
arrDoub[j]++;
}
return arrDoub;
}
//for entropy - Jakulin's method
public static double[] flatten2dTo1d(double tab[][]){
double arr[]=new double[tab.length*tab[0].length];
int x=0;
for(int i=0;i<tab.length;i++){
for(int j=0;j<tab[i].length;j++){
arr[x]=tab[i][j];
x++;
}
}
return arr;
}
//for entropy - Jakulin's method
public static double[] flatten3dTo1d(double tab[][][]){
double arr[]=new double[tab.length*tab[0].length*tab[0][0].length];
int x=0;
for(int i=0;i<tab.length;i++){
for(int j=0;j<tab[i].length;j++){
for(int k=0;k<tab[i][j].length;k++){
arr[x]=tab[i][j][k];
x++;
}
}
}
return arr;
}
//for Jakulin's method
public static Map<String, Double> orderMapByDescValue(Map<String, Double> unorderedMap) {
return unorderedMap.entrySet().stream()
.sorted(Collections.reverseOrder(Map.Entry.comparingByValue()))
.collect(Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
(x, y) -> { throw new IllegalStateException("Unexpected merge request"); },
LinkedHashMap::new));
}
//set dots for psi, calculate attr. importance and get indexes of n important features, for all features set numOfImpt=Integer.MAX_VALUE
public static void setDotsAndLine(Classifier predictionModel, Instances data,int m, boolean isClassification, int RESOLUTION, double minMaxForNumericAttributes[][], int numOfImpt) throws Exception{ //classIndex - 0,1 ... e.g.: {no-recurrence-events,recurrence-events}, we have two indexes no-recurrence-events ... 0, recurrence-events ... 1
dotsA = new ArrayList[data.numAttributes() -1];
dotsB = new ArrayList[data.numAttributes() -1];
ArrayList<Double[]> attrImp = new ArrayList<>();
Map<String, Double> sortedMap=new TreeMap<>();
//explain attributes' values
for (int i = 0; i < data.numAttributes() - 1; i++){
dotsA[i] = new ArrayList();
dotsB[i] = new ArrayList();
Attribute tempAttr = data.attribute(i);
//for nominal attribute we calculate contributions of all its values
if (tempAttr.isNominal()){
for (int j = 0; j < tempAttr.numValues(); j++){
double tempValue = j;
double[] psi = IME.explainValueAttrImp(predictionModel, data, i, j, m, true, minMaxForNumericAttributes,isClassification);
dotsA[i].add(tempValue);
dotsA[i].add(mean(psi)); //see Formula 14 in Štrumbelj, E., & Kononenko, I. (2014). Explaining prediction models and individual predictions with feature contributions. Knowledge and information systems, 41(3), 647-665.
attrImp.add(ArrayUtils.toObject(psi));
}
}
//for numerical attribute, we calculate contributions of the value between the min and max value
//distributions are evenly distributed, the number depends on the resolution we want for visualisation
if (tempAttr.isNumeric()){
for (int j = 0; j < RESOLUTION; j++){
double tempValue = minMaxForNumericAttributes[i][0] + j * ((minMaxForNumericAttributes[i][1] - minMaxForNumericAttributes[i][0]) / (double)RESOLUTION);
double[] psi = IME.explainValueAttrImp(predictionModel, data, i, tempValue, m, false, minMaxForNumericAttributes, isClassification);
dotsA[i].add(tempValue);
dotsA[i].add(mean(psi)); //see Formula 14 in Štrumbelj, E., & Kononenko, I. (2014). Explaining prediction models and individual predictions with feature contributions. Knowledge and information systems, 41(3), 647-665.
attrImp.add(ArrayUtils.toObject(psi));
}
}
dotsB[i].add(Math.sqrt(varArrList(attrImp,meanArrList(attrImp))));
sortedMap.put(tempAttr.name(),Math.sqrt(varArrList(attrImp,meanArrList(attrImp))));
attrImp.clear();
}
//print sorted map
LinkedList<Map.Entry<String, Double>> list= new LinkedList<>(sortedMap.entrySet());
Comparator<Map.Entry<String, Double>> comparator = Comparator.comparing(Map.Entry::getValue);
Collections.sort(list, comparator.reversed()); //if we want reversed order ... descending order
for(Map.Entry<String, Double> me : list){
System.out.printf(" %4.4f %s\n",me.getValue(), me.getKey());
// logFile.printf(" %4.4f %s\n",me.getValue(), me.getKey());
}
//get attribute importance
double attrImpVal[]=new double[data.numAttributes()-1];
for(int i=0; i<data.numAttributes()-1;i++){
for(int j=0;j<dotsB.length;j++){
attrImpVal[i]=dotsB[i].get(0);
}
}
double attrImpCp[]=attrImpVal.clone();
Arrays.sort(attrImpVal);
//get id of attributes that are above nth attr
if(attrImpVal.length>numOfImpt){
nThHigh="";
for(int i=0; i<attrImpCp.length;i++){
if(attrImpCp[i]>=attrImpVal[attrImpVal.length-numOfImpt] && attrImpCp[i]>0){ //if among the n-th most important is also the one that is not informative (equal to 0) it is not drawn
nThHigh+=(i+1)+",";
}
}
if(nThHigh.contains(","))
nThHigh=nThHigh.substring(0, nThHigh.lastIndexOf(','));
}
}
public static void visualizeModelInstances(Classifier predictionModel, Instances data, boolean isClassification, int RESOLUTION, int numOfImpt, int fromInst, int toInst) throws Exception{
predictionModel.buildClassifier(data);
System.out.println("Attribute importance using explanation method IME, prediction alg. is "+predictionModel.getClass().getSimpleName());
boolean visExplFC; //visualisation before or after FC
String modelName=predictionModel.getClass().getSimpleName();
String classValueName=(new Instances(data,0,1)).instance(0).classAttribute().value(classToExplain); // get class name of the explained class
//visualise model
String outputDir, fName, format;
datasetName=fileName;
if(datasetName.contains(".arff") && datasetName.contains("LFeat-"))
datasetName=datasetName.substring(0, datasetName.indexOf("LFeat-")+"LFeat-".length()-1);
if(datasetName.contains(".arff") && !datasetName.contains("LFeat-"))
datasetName=datasetName.substring(0, datasetName.indexOf(".arff"));
format=".eps";
if(isClassification)
fName=modelName + "_" + datasetName + "_model-class_"+classValueName;
else
fName=modelName + "_" + datasetName + "_model-regr";
/***************************************calculate attribute importance of thr changed dataset - added features***************************************/
setDotsAndLine(predictionModel, data,N_SAMPLES,isClassification,RESOLUTION, minMaxNumAttr(data),numOfImpt); //sets also parameter nThHigh ... indexes for attributes with high importance
visExplFC = justExplain && visualisation;
outputDir = visExplFC ? "visualisation/afterFC/eps/" : "visualisation/beforeFC/eps/";
//draw only numOfImpt or less most informative attributes in the model
/*Model visualisation*/
if(data.numAttributes()-1 > numOfImpt){
Remove remove= new Remove();
Instances dataCp=new Instances(data);
remove= new Remove();
String attr=nThHigh;
attr=attr+","+dataCp.numAttributes(); //+1 ... class
remove.setAttributeIndices(attr);
remove.setInvertSelection(true); //we need to remove unselected attributes - invert selection
remove.setInputFormat(dataCp);
dataCp = Filter.useFilter(dataCp, remove); //select only attributes that are in the model
dataCp.setClassIndex(dataCp.numAttributes()-1); //set class attribute
String tmp[]=attr.split(",");
int[] integers = new int[tmp.length];
for (int i = 0; i < integers.length; i++)
integers[i] = Integer.parseInt(tmp[i]);
ArrayList<Double>[] dotsACp = new ArrayList[dataCp.numAttributes()-1];
ArrayList<Double>[] dotsBCp = new ArrayList[dataCp.numAttributes()-1];
int x=0;
for(int i=0;i<dotsA.length;i++){
int m=i+1;
if(ArrayUtils.contains(integers,m)){
dotsACp[x]=dotsA[i];
dotsBCp[x]=dotsB[i];
x++;
}
}
Visualize.modelVisualToFileAttrImptLine(outputDir +fName+format, modelName, datasetName, dataCp, dotsACp, dotsBCp,isClassification,RESOLUTION,classToExplain,"A4", visExplFC);
}
else if(data.numAttributes()-1 <= 6) //for format A4, for pdf and png format
Visualize.modelVisualToFileAttrImptLine(outputDir +fName+format, modelName, datasetName, data, dotsA, dotsB,isClassification,RESOLUTION,classToExplain,"A4", visExplFC);
else
Visualize.modelVisualToFileAttrImptLine(outputDir +fName+format, modelName, datasetName, data, dotsA, dotsB,isClassification,RESOLUTION,classToExplain,"AA", visExplFC); //AA just smth. different of A4
Visualize.attrImportanceVisualizationSorted(outputDir +fName+"-attrImp"+format, modelName, datasetName, data, drawLimit, dotsB,isClassification,RESOLUTION,"AA",visExplFC);
/*Instance visualisation*/
//pdf, png -> model
if(pdfPng){
if(visExplFC){
covertToPdfAndPng(fName,format, "visualisation/afterFC/eps/","visualisation/afterFC/pdf/","visualisation/afterFC/png/");
covertToPdfAndPng(fName+"-attrImp",format, "visualisation/afterFC/eps/","visualisation/afterFC/pdf/","visualisation/afterFC/png/"); //plot attribute importance
}
else{
covertToPdfAndPng(fName,format, "visualisation/beforeFC/eps/","visualisation/beforeFC/pdf/","visualisation/beforeFC/png/");
covertToPdfAndPng(fName+"-attrImp",format, "visualisation/beforeFC/eps/","visualisation/beforeFC/pdf/","visualisation/beforeFC/png/"); //plot attribute importance
}
}
for(int i = fromInst; i <= toInst; i++){
outputDir = visExplFC ? "visualisation/afterFC/eps/" : "visualisation/beforeFC/eps/";
double[] instanceExplanation = IME.explainInstanceNew(predictionModel, data, new Instances(data,(i-1),1), N_SAMPLES, isClassification, classToExplain);
double pred = -1;
if (isClassification)
pred = predictionModel.distributionForInstance((new Instances(data,(i-1),1)).instance(0))[classToExplain];
else
pred = predictionModel.classifyInstance((new Instances(data,(i-1),1)).instance(0));
format=".eps";
if(isClassification)
fName=modelName + "_" + datasetName + "_instance_" + (i)+ "-class_"+classValueName;
else
fName=modelName + "_" + datasetName + "_instance_" + (i)+ "-regr";
Visualize.instanceVisualizationToFile(outputDir +fName+format, modelName, datasetName, new Instances(data,(i-1),1), i, topHigh, instanceExplanation, pred, classToExplain, isClassification, visExplFC);
//pdf, png -> instance(s)
if(pdfPng)
if(visExplFC)
covertToPdfAndPng(fName, format,"visualisation/afterFC/eps/","visualisation/afterFC/pdf/","visualisation/afterFC/png/");
else
covertToPdfAndPng(fName, format,"visualisation/beforeFC/eps/","visualisation/beforeFC/pdf/","visualisation/beforeFC/png/");
}
}
public static void covertToPdfAndPng(String fName, String inFormat,String inDirEps, String outDirPdf, String outDirPng) throws Exception {
FileOutputStream fos;
PSDocument document;
File f;
PDFConverter converter;
PDFDocument documentNew;
SimpleRenderer renderer;
List<Image> images;
String format;
//load PostScript document
document = new PSDocument();
f=new File(inDirEps+fName+inFormat);
document.load(f);
//create OutputStream
outDirPdf=(justExplain && visualisation) ? "visualisation/afterFC/pdf/" : "visualisation/beforeFC/pdf/";
format=".pdf";
fos = new FileOutputStream(new File(outDirPdf+fName+format));
//create converter
converter = new PDFConverter();
converter.setPaperSize(PaperSize.A4);
//convert eps to pdf
converter.convert(document, fos);
//convert pdf to png
documentNew = new PDFDocument();
documentNew.load(new File(outDirPdf+fName+format));
renderer = new SimpleRenderer();
renderer.setResolution(300);// set resolution (in DPI)
//render
outDirPng=(justExplain && visualisation) ? "visualisation/afterFC/png/" : "visualisation/beforeFC/png/"; //does not override existing image ... delete image with the same name before run
format=".png";
images = renderer.render(documentNew);
for (int j = 0; j < images.size(); j++) {
ImageIO.write((RenderedImage) images.get(j), "png", new File(outDirPng+fName+format)); //save images to png format
}
}
//combinations of order N
public static List allCombOfOrderN(List<String> allComb, int N){
Set<String> hSet = new HashSet<>();
List newTmpComb;
String tmpArrCG[];
for(int i=0;i<allComb.size();i++){
tmpArrCG=allComb.get(i).split(",");
newTmpComb=Arrays.asList(Generator.combination(tmpArrCG).simple(N).stream().toArray());
hSet.addAll(newTmpComb);
}
ArrayList<String> finalComb = new ArrayList<>(hSet);
return finalComb;
}
public static double sumArr(double tab []){
return Arrays.stream(tab).sum();
}
public static float sumArr(float tab []){
float sum=0;
for(int i=0;i<tab.length;i++)
sum+=tab[i];
return sum;
}
public static double mean(double[] d){
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += d[i];
return sum / d.length;
}
public static double mean(int[] d){
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += d[i];
return sum / d.length;
}
public static double mean(long[] d){
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += d[i];
return sum / d.length;
}
public static double meanArrList(ArrayList<Double[]> arrList){
double sum = 0;
int count =0;
for (int i = 0; i < arrList.size(); i++){
for(int j = 0; j < arrList.get(i).length; j++){
sum += arrList.get(i)[j];
count++;
}
}
return sum / count;
}
public static double var(double[] d, double m){ //variance
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += (d[i] - m) * (d[i] - m);
return sum / d.length;
}
public static double var(int[] d, double m){ //variance
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += (d[i] - m) * (d[i] - m);
return sum / d.length;
}
public static double var(long[] d, double m){ //variance
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += (d[i] - m) * (d[i] - m);
return sum / d.length;
}
public static double varArrList(ArrayList<Double[]> arrList, double m){ //variance
double sum = 0;
int count =0;
for (int i = 0; i < arrList.size(); i++){
for(int j=0; j < arrList.get(i).length;j++){
sum += (arrList.get(i)[j]-m) *(arrList.get(i)[j]-m);
count++;
}
}
return sum / (count);
}
public static double varArrList2(ArrayList<Double> arrList, double m){ //variance
double sum = 0;
int count =0;
for (int i = 0; i < arrList.size(); i++){
sum += (arrList.get(i)-m) *(arrList.get(i)-m);
count++;
}
return sum / (count);
}
public static double var(Vector d){
double m1= 0;
for (int i = 0; i < d.size(); i++)
m1 += (Double)d.elementAt(i);
m1 /= d.size();
double sum = 0;
for (int i = 0; i < d.size(); i++)
sum += ((Double)d.elementAt(i) - m1) * ((Double)d.elementAt(i)- m1);
return sum / d.size();
}
public static String rnd3(double d) {
DecimalFormat twoDForm = new DecimalFormat("0.000");
return twoDForm.format(d).replace(",",".");
}
public static double round(double value, int precision){
int scale=(int) Math.pow(10, precision);
return (double) Math.round(value * scale) / scale;
}
public static void print1d(double tab []){
for(int i=0;i<tab.length;i++){
System.out.print(rnd3(tab[i])+"\t");
logFile.print(rnd3(tab[i])+"\t");
}
}
public static void print2d(double arr [][]) {
for(int i=0; i<arr.length; i++){
for(int j=0; j<arr[i].length;j++){
System.out.printf("%9.3f",arr[i][j]);
}
System.out.println();
}
}
public static void print2d(float arr [][]) {
for(int i=0; i<arr.length; i++){
for(int j=0; j<arr[i].length;j++){
System.out.printf("%9.3f",arr[i][j]);
}
System.out.println();
}
}
public static void printList(List ar1){
for(int i=0;i<ar1.size();i++)
System.out.println(ar1.get(i)+" ");
}
public static void printFqAttrOneRow(List<String> ar1, Instances data){
String [] tmp;
for(String el: ar1){
tmp=el.split(",");
for(int i=0;i<tmp.length;i++)
if(justExplain)
impGroupsKD.print(data.attribute(Integer.parseInt(tmp[i])).name()+" ");
else
impGroups.print(data.attribute(Integer.parseInt(tmp[i])).name()+" ");
if(justExplain)
impGroupsKD.print("~#~ ");
else
impGroups.print("~#~ ");
}
}
public static int[] printMaxConstructLength(List<String> ar1){
String [] tmp;
int max=0;
int sum=0;
int sumMax[]=new int[2];
for(String el: ar1){
tmp=el.split(",");
sum+=tmp.length;
if(tmp.length>max)
max=tmp.length;
}
sumMax[0]=sum;
sumMax[1]=max;
return sumMax;
}
//for Jakulin's method - to get attribute names; just for info
public static void printAttrNamesIntInf(Instances data, List list){
String tmpArr[];
for(int i=0;i<list.size();i++){
tmpArr=list.get(i).toString().replace("[","").replace("]", "").trim().split(",");
System.out.println(data.attribute(Integer.parseInt(tmpArr[0].trim())).name()+" - "+data.attribute(Integer.parseInt(tmpArr[1].trim())).name());
impGroups.println(data.attribute(Integer.parseInt(tmpArr[0].trim())).name()+" - "+data.attribute(Integer.parseInt(tmpArr[1].trim())).name());
}
}
public static void printMap1(Map<String, Integer> frequencyMap){
Iterator<String> tmpIterator1 = frequencyMap.keySet().iterator();
while (tmpIterator1.hasNext()){
String str = tmpIterator1.next();
System.out.println(str + ": " + frequencyMap.get(str));
logFile.println(str + ": " + frequencyMap.get(str));
}
}
public static void printMap2(Map<String, Integer> frequencyMap, Instances data){
Iterator<String> tmpIterator1 = frequencyMap.keySet().iterator();
String [] tmp1; String str;
while(tmpIterator1.hasNext()){
str = tmpIterator1.next();
tmp1=str.split(",");
if(justExplain)
impGroupsKD.print("\t");
else
impGroups.print("\t");
for(int i=0;i<tmp1.length;i++){
if(i==tmp1.length-1){
if(justExplain)
impGroupsKD.print(data.attribute(Integer.parseInt(tmp1[i])).name());
else
impGroups.print(data.attribute(Integer.parseInt(tmp1[i])).name());
}
else{
if(justExplain)
impGroupsKD.print(data.attribute(Integer.parseInt(tmp1[i])).name()+" | ");
else
impGroups.print(data.attribute(Integer.parseInt(tmp1[i])).name()+" | ");
}
}
if(justExplain)
impGroupsKD.println(": " + frequencyMap.get(str));
else
impGroups.println(": " + frequencyMap.get(str));
}
}
public static void deleteXGBdll(){
File tempF=new File(tmpDir);
for(File fileTmp : tempF.listFiles()){
if(fileTmp.getName().contains("xgboost4j")){
fileTmp.delete();
}
}
}
public static void deleteTempRFiles(){
File tempF=new File(tmpDir);
for(File fileTmp : tempF.listFiles()){
if(fileTmp.getName().contains("RControl") || fileTmp.getName().contains("ROutput") || fileTmp.getName().contains("getTmpDir")){
fileTmp.delete();
}
}
}
}
| 378,389 | 66.751119 | 2,328 | java |
featConstr | featConstr-master/src/featconstr/IME.java | package featconstr;
import java.util.Arrays;
import java.util.Random;
import java.util.Vector;
import jsc.combinatorics.Permutation;
import jsc.combinatorics.Permutations;
import weka.classifiers.Classifier;
import weka.core.Instances;
/**
*
* @author bostjan
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public class IME{
public static int CLASS_VALUE_TO_EXPLAIN =1; //0 = first class value
public static int CLASS_IDX = -1; //default = -1 (last attribute is class attribute)
public static Random rand = new Random();
//equal sampling
public static double[][] explainAllDatasetES(Instances train, Instances test, Classifier K, int numSamples, int classToExplain)throws Exception{
double[][] instanceExplanation= new double[test.numInstances()][test.numAttributes()-1];
int classIdx = CLASS_IDX;
if (classIdx < 0 || classIdx >= test.numAttributes())
test.setClassIndex(test.numAttributes()-1);
else
test.setClassIndex(classIdx);
boolean isClassification = true;
if (test.classAttribute().isNumeric())
isClassification = false;
for (int i = 0; i < test.numInstances(); i++)
instanceExplanation[i] = explainInstanceNew(K, train, new Instances(test,i,1), numSamples, isClassification, classToExplain); //Štrumbelj's modifications
//instanceExplanation[i] = explainInstance(K, train, new Instances(test,i,1), numSamples, isClassification, classToExplain);
return instanceExplanation;
}
//adaptive sampling - stopping criteria is sum of samples
public static double[][] explainAllDatasetAS(Instances train, Instances test, Classifier K, int mMin, int maxS, int classToExplain)throws Exception{
double[][] instanceExplanation= new double[test.numInstances()][test.numAttributes()-1];
int classIdx = CLASS_IDX;
if (classIdx < 0 || classIdx >= test.numAttributes())
test.setClassIndex(test.numAttributes()-1);
else
test.setClassIndex(classIdx);
boolean isClassification = true;
if (test.classAttribute().isNumeric())
isClassification = false;
for (int i = 0; i < test.numInstances(); i++)
instanceExplanation[i]= explainInstAdapSmp(K, train, new Instances(test,i,1), isClassification, classToExplain, mMin, maxS);
return instanceExplanation;
}
//adaptive sampling - stopping criteria is approxamization error for all attributes
public static double[][] explainAllDatasetAS(Instances train, Instances test, Classifier K, int mMin, int classToExplain, double e, int pctErr)throws Exception{
double[][] instanceExplanation= new double[test.numInstances()][test.numAttributes()-1];
int classIdx = CLASS_IDX;
if (classIdx < 0 || classIdx >= test.numAttributes())
test.setClassIndex(test.numAttributes()-1);
else
test.setClassIndex(classIdx);
boolean isClassification = true;
if (test.classAttribute().isNumeric())
isClassification = false;
for (int i = 0; i < test.numInstances(); i++)
instanceExplanation[i]= explainInstAdapSmp(K, train, new Instances(test,i,1), isClassification, classToExplain, mMin, e, pctErr);
return instanceExplanation;
}
//sampling based on the approxamization error
public static double[][] explainAllDatasetAES(Classifier K, Instances train, Instances test, boolean isClassification, int classToExplain, int mMin, double e, int pctErr)throws Exception{
double[][] instanceExplanation= new double[test.numInstances()][test.numAttributes()-1];
for (int i = 0; i < test.numInstances(); i++)
instanceExplanation[i] = explainInstAproxErr(K, train, new Instances(test,i,1),isClassification, classToExplain, mMin, e, pctErr);
return instanceExplanation;
}
//credits to Erik Štrumbelj
//*Štrumbelj, E., & Kononenko, I. (2011, April). A general method for visualizing and explaining black-box regression models. In International Conference on Adaptive and Natural Computing Algorithms (pp. 21-30). Springer, Berlin, Heidelberg.
public static double[] explainInstance(Classifier K, Instances D, Instances I1, int m, boolean isClassification, int classToExplain) throws Exception{
//algorithm for calculating attribute contributions in a particular case (Algorithm 1* from the article, adapted to working on all combinations of continous/nominal attributes and classification/regression problems)
//INPUT: prediction model, dataset, instance to explain, number of samples m, classification problem (true or false), class to explain
//OUTPUT: vector of contributions of individual attributes for a given case
int numOfFeats = D.numAttributes() - 1;
double[] result = new double[numOfFeats];
Permutations permuts = new Permutations(numOfFeats);
for (int i = 0; i < m; i++){ //repeat m times
Permutation tempPermutation = permuts.randomPermutation();
int[] intPermutation = tempPermutation.toIntArray(); //we choose a random permutation
for (int feature = 0; feature < numOfFeats; feature++){ //for each attribute
// *** ASSEMBLE 2 EXAMPLES ****
//here, the value of the attribute is randomly selected by randomly selecting an instance from the dataset and overwriting the value for a given attribute (separately for each attribute)
//in the first case, we randomly select the values of the attributes that are to the right of the current attribute (including the current attribute); the remaining values are taken from the instance we are explaining
Instances instance = new Instances(I1,0, 1);
int featureIndex = intPermutation[feature]-1; //because we have permutations e.g., from 1 to 6, we have to subtract 1 because we have indexes from 0 to 5
for (int j = D.numAttributes() - 2; j >= feature; j--){ //D.numAttributes() returns num of attributes + class, that's why -2
int rndInst=rand.nextInt(D.numInstances());
double value = D.instance(rndInst).value(intPermutation[j]-1); //the value of a given attribute in a randomly selected case (instance)
instance.instance(0).setValue(intPermutation[j]-1, value);
}
double predictionLo = 0;
if (isClassification)
predictionLo = K.distributionForInstance(instance.instance(0))[classToExplain];
else
predictionLo = K.classifyInstance(instance.instance(0));
//in the second case, we randomly select the values of the attributes that are strictly to the right of the current attribute; the remaining values are taken from the instance we are explaining
instance = new Instances(I1,0, 1);
for (int j = D.numAttributes() - 2; j > feature; j--){
int rndInst=rand.nextInt(D.numInstances());
double value = D.instance(rndInst).value(intPermutation[j]-1);
instance.instance(0).setValue(intPermutation[j]-1, value);
}
double predictionHi = 0;
if (isClassification)
predictionHi = K.distributionForInstance(instance.instance(0))[classToExplain];
else
predictionHi = K.classifyInstance(instance.instance(0));
result[featureIndex] += predictionHi-predictionLo;
}
}
for (int featureIndex = 0; featureIndex < numOfFeats; featureIndex++)
result[featureIndex] /= m; //in the end, we divide by the number of samples
return result;
}
//credits to Erik Štrumbelj
public static double[] explainInstanceNew(Classifier K, Instances D, Instances I1, int m, boolean isClassification, int classValueToExplain) throws Exception{
int numOfFeats = D.numAttributes() - 1;
Random internalRand = new Random();
double[] result = new double[numOfFeats];
Permutations permuts = new Permutations(numOfFeats);
for (int i = 0; i < m; i++){ // equal sampling
Permutation tempPermutation = permuts.randomPermutation();
int[] intPermutation = tempPermutation.toIntArray();
for (int feature = 0; feature < numOfFeats; feature++){
Instances instance = new Instances(I1,0, 1);
int featureIndex = intPermutation[feature]-1;
for (int j = D.numAttributes() - 2; j > feature; j--){
double value = D.instance(internalRand.nextInt(D.numInstances())).value(intPermutation[j]-1); //the value of a given attribute in a randomly selected case (instance)
instance.instance(0).setValue(intPermutation[j]-1, value);
}
double predictionHi = 0;
if (isClassification)
predictionHi = K.distributionForInstance(instance.instance(0))[classValueToExplain];
else
predictionHi = K.classifyInstance(instance.instance(0));
double value = D.instance(internalRand.nextInt(D.numInstances())).value(intPermutation[feature]-1); //the value of a given attribute in a randomly selected case (instance)
instance.instance(0).setValue(intPermutation[feature]-1, value);
double predictionLo = 0;
if (isClassification)
predictionLo = K.distributionForInstance(instance.instance(0))[classValueToExplain];
else
predictionLo = K.classifyInstance(instance.instance(0));
result[featureIndex] += predictionHi-predictionLo;
}
}
for (int featureIndex = 0; featureIndex < numOfFeats; featureIndex++)
result[featureIndex] /= m;
return result;
}
//using one sample for sampling
public static double explainInstanceOneS(Classifier K, Instances D, Instances I1, int feat, boolean isClassification, int classValueToExplain) throws Exception{
int numOfFeats = D.numAttributes() - 1;
Random internalRand = new Random();
Permutations permuts = new Permutations(numOfFeats);
Permutation tempPermutation = permuts.randomPermutation();
int[] intPermutation = tempPermutation.toIntArray(); //use random permutation
Instances instance = new Instances(I1,0, 1);
int feature=0;
for(int i=0;i<intPermutation.length;i++)
if(feat+1==intPermutation[i])
feature=i;
for (int j = D.numAttributes() - 2; j > feature; j--){
double value = D.instance(internalRand.nextInt(D.numInstances())).value(intPermutation[j]-1); //the value of a given attribute in a randomly selected case (instance)
instance.instance(0).setValue(intPermutation[j]-1, value);
}
double predictionHi = 0;
if(isClassification)
predictionHi = K.distributionForInstance(instance.instance(0))[classValueToExplain];
else
predictionHi = K.classifyInstance(instance.instance(0));
double value = D.instance(internalRand.nextInt(D.numInstances())).value(intPermutation[feature]-1); //the value of a given attribute in a randomly selected case (instance)
instance.instance(0).setValue(intPermutation[feature]-1, value);
double predictionLo = 0;
if(isClassification)
predictionLo = K.distributionForInstance(instance.instance(0))[classValueToExplain];
else
predictionLo = K.classifyInstance(instance.instance(0));
double finalC = +predictionHi-predictionLo;
return finalC;
}
//credits to Erik Štrumbelj
//*Štrumbelj, E., & Kononenko, I. (2011, April). A general method for visualizing and explaining black-box regression models. In International Conference on Adaptive and Natural Computing Algorithms (pp. 21-30). Springer, Berlin, Heidelberg.
public static double[] explainValue(Classifier K, Instances D, int attrIdx, double valIdx, int m, boolean isNominal, double[][] extremeValues, boolean isClassification){
//algorithm for calculating the general contribution of a value of an attribute (Algorithm 2 * from the paper, adapted to work on all combinations of continuous / nominal attributes and classification / regression)
//INPUT: prediction model, dataset, index of an attribute, index of a value / value [depending on classification or regression], number of samples m, nominal attribute (true or false), min and max attribute values, classification problem (true or false)
//OUTPUT: contribution of a value (psi) and stdev contribution
double[] res = new double[2];
double[] psi = new double[m]; //e.g., default value is 1000
try{
for (int i = 0; i < m; i++){ //repeat m times, once for each sample
//default
Instances instance1 = new Instances(D,0,1);
Instances instance2 = new Instances(D,0,1);
for (int j = 0; j < D.numAttributes()-1; j++){
//randomly set attribute values (same in both cases)
//we set the value of the attribute (which we explaining) only in the second case to the value we are interested in
if (D.attribute(j).isNominal()){
String value = D.attribute(j).value(rand.nextInt(D.attribute(j).numValues()));
instance1.instance(0).setValue(j, value);
instance2.instance(0).setValue(j, value);
if (j == attrIdx)
instance2.instance(0).setValue(j, D.attribute(j).value((int)valIdx));
}
else{
//we need min / max to know from which interval to choose random values
//extremeValues - 2D table in which are min and max attribute values ([j][0]-min, [j][1]-max)
double value = rand.nextFloat() * (extremeValues[j][1] - extremeValues[j][0]) + extremeValues[j][0]; //rand * (max-min) + min
instance1.instance(0).setValue(j, value);
instance2.instance(0).setValue(j, value);
//set the value of the attribute to the value we are interested in
if (j == attrIdx)
instance2.instance(0).setValue(j, valIdx);
}
}
double p2 = -1;
double p1 = -1;
if (isClassification){ //at classification we look at the probability of a prediction for the desired class, at regression only the prediction
p2 = K.distributionForInstance(instance2.instance(0))[CLASS_VALUE_TO_EXPLAIN];
p1 = K.distributionForInstance(instance1.instance(0))[CLASS_VALUE_TO_EXPLAIN];
}
else{
p2 = K.classifyInstance(instance2.instance(0));
p1 = K.classifyInstance(instance1.instance(0));
}
psi[i] = p2 - p1; //the difference between the prediction with the value and the "no" value
}
res[0] = mean(psi); //contribution of the value (psi), mean is a method that calculates the average in a 1D table
res[1] = Math.sqrt(var(psi,mean(psi))); //stdev of contribution
}
catch(Exception e){
e.printStackTrace();
}
return res;
}
//similar as explainValue method, returns only psi; explainValue method returns mean(psi) and stdev(psi)
public static double[] explainValueAttrImp(Classifier K, Instances D, int attrIdx, double valIdx, int m, boolean isNominal, double[][] extremeValues, boolean isClassification){
double[] psi = new double[m];
try{
for (int i = 0; i < m; i++){
Instances instance1 = new Instances(D,0,1);
Instances instance2 = new Instances(D,0,1);
for (int j = 0; j < D.numAttributes()-1; j++){
if (D.attribute(j).isNominal()){
String value = D.attribute(j).value(rand.nextInt(D.attribute(j).numValues()));
instance1.instance(0).setValue(j, value);
instance2.instance(0).setValue(j, value);
if (j == attrIdx)
instance2.instance(0).setValue(j, D.attribute(j).value((int)valIdx));
}
else{
double value = rand.nextFloat() * (extremeValues[j][1] - extremeValues[j][0]) + extremeValues[j][0];
instance1.instance(0).setValue(j, value);
instance2.instance(0).setValue(j, value);
if (j == attrIdx)
instance2.instance(0).setValue(j, valIdx);
}
}
double p2 = -1;
double p1 = -1;
if (isClassification){
p2 = K.distributionForInstance(instance2.instance(0))[CLASS_VALUE_TO_EXPLAIN];
p1 = K.distributionForInstance(instance1.instance(0))[CLASS_VALUE_TO_EXPLAIN];
}
else{
p2 = K.classifyInstance(instance2.instance(0));
p1 = K.classifyInstance(instance1.instance(0));
}
psi[i] = p2 - p1;
}
}
catch(Exception e){
e.printStackTrace();
}
return psi;
}
//adaptive sampling; sum of samples (constraint) based on variance; Algorithm 2 from the paper*
//*Štrumbelj, E., & Kononenko, I. (2014). Explaining prediction models and individual predictions with feature contributions. Knowledge and information systems, 41(3), 647-665.
public static double[] explainInstAdapSmp(Classifier K, Instances D, Instances I, boolean isClassification, int classValueToExplain, int mMin, int mMax) throws Exception{
int numOfAttr=D.numAttributes()-1;
int m[]=new int[numOfAttr]; //set all values to 0 ... first, number of samples for each attribute are 0
double psi[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpVariance[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpMean[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
Vector v[]= new Vector[numOfAttr];
double diff[]=new double[numOfAttr]; //for minimizing squared error
for(int i=0;i<D.numAttributes()-1;i++)
v[i]=new Vector();
for(int i = 0 ; i < psi.length; i++ )
psi[i] = 0;
for(int i=0;i<numOfAttr;i++)
m[i]=mMin;
double tmpPsi;
//get initial variances for each feature
for(int i=0;i<numOfAttr;i++){
for(int j=0;j<mMin;j++){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
}
}
//for minimizing squared error
for(int i=0;i<numOfAttr;i++)
diff[i]=Math.sqrt(tmpVariance[i]/m[i]) - Math.sqrt(tmpVariance[i]/(m[i]+1));
int j;
while(sumArr(m)<mMax){
j=idxOfMaxValue(diff); //index of max value in array diff
tmpPsi=explainInstanceOneS(K, D, I, j, isClassification,classValueToExplain);
v[j].add(tmpPsi);
tmpMean[j]=mean(v[j]);
psi[j] += tmpPsi;
tmpVariance[j]=incrementalVar(v[j].size(), tmpVariance[j], tmpMean[j], tmpPsi)[3];
m[j]++;
diff[j]=Math.sqrt(tmpVariance[j]/m[j])-Math.sqrt(tmpVariance[j]/(m[j]+1)); //we have to correct values in diff
}
for(int i=0;i<psi.length;i++)
psi[i]=psi[i]/m[i];
return psi;
}
//adaptive sampling; sampling based on aproximization error, sampling while(Math.sqrt(zSquared*tmpVariance[i]/m[i]) > e); Algorithm 2 from the paper*
//*Štrumbelj, E., & Kononenko, I. (2014). Explaining prediction models and individual predictions with feature contributions. Knowledge and information systems, 41(3), 647-665.
public static double[] explainInstAdapSmp(Classifier K, Instances D, Instances I, boolean isClassification, int classValueToExplain, int mMin, double e, int pctErr) throws Exception{
int numOfAttr=D.numAttributes()-1;
int m[]=new int[numOfAttr]; //set all values to 0 ... first, number of samples for each attribute are 0
double psi[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpVariance[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpMean[]=new double[numOfAttr]; //set all values to 0 ... first, all contributions are 0
Vector v[]= new Vector[numOfAttr];
double diff[]=new double[numOfAttr]; //for minimizing squared error
for(int i=0;i<D.numAttributes()-1;i++)
v[i]=new Vector();
for(int i = 0 ; i < psi.length; i++ )
psi[i] = 0;
for(int i=0;i<numOfAttr;i++)
m[i]=mMin;
double tmpPsi;
//get initial variances for each feature
for(int i=0;i<numOfAttr;i++){
for(int j=0;j<mMin;j++){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
}
}
double z;
switch(pctErr){
case 90:z=1.285;break; //for 90% of probability
case 95:z=1.645;break; //for 95% of probability
case 99:z=2.325;break; //for 99% of probability
default: z=1.285; //we set default Z for 90%
}
double zSquared=Math.pow(z, 2);
for(int i=0;i<numOfAttr;i++){
while(Math.sqrt(zSquared*tmpVariance[i]/m[i]) > e){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
m[i]++;
}
}
for(int i=0;i<psi.length;i++)
psi[i]=psi[i]/m[i];
return psi;
}
//sampling based on the aproxamization error, calculate number of samples for each attribute based on mMin samples
//see page 10 in Štrumbelj, E., & Kononenko, I. (2010). An efficient explanation of individual classifications using game theory. The Journal of Machine Learning Research, 11, 1-18.
public static double[] explainInstAproxErr(Classifier K, Instances D, Instances I, boolean isClassification, int classValueToExplain, int mMin, double e, int pctErr) throws Exception{
int numOfAttr=D.numAttributes()-1;
int m[]=new int[numOfAttr]; //set all values to 0 ... first, number of samples for each attribute are 0
double psi[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpVariance[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpMean[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
Vector v[]= new Vector[numOfAttr];
for(int i=0;i<D.numAttributes()-1;i++)
v[i]=new Vector();
for(int i = 0 ; i < psi.length; i++ )
psi[i] = 0;
double tmpPsi;
//get initial variances for each feature by using mMin samples
for(int i=0;i<numOfAttr;i++){
for(int j=0;j<mMin;j++){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
}
m[i]=mMin;
}
double z;
switch(pctErr){
case 90:z=1.285;break; //for 90% of probability
case 95:z=1.645;break; //for 95% of probability
case 99:z=2.325;break; //for 99% of probability
default: z=1.285; //we set default Z for 90%
}
double zSquared=Math.pow(z, 2);
double eSquared=Math.pow(e, 2);
//calculate number of samples for each attribute
int tmpM;
for(int i=0;i<numOfAttr;i++){
tmpM=(int)Math.round((zSquared*tmpVariance[i])/eSquared);
if(tmpM>mMin)
m[i]=tmpM;
}
for(int i=0;i<numOfAttr;i++)
if(m[i]>mMin)
for(int j=0;j<(m[i]-mMin);j++)
psi[i] +=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
for(int i=0;i<psi.length;i++)
psi[i]=psi[i]/m[i];
return psi;
}
//minimizing the number of samples by estimation the sample variance
//adaptive sampling - Algorithm 2 from the paper*, test of stopping condition (m[i]<Math.round((zSquared*tmpVariance[i])/eSquared) && m[i]<mMax)
//*Štrumbelj, E., & Kononenko, I. (2014). Explaining prediction models and individual predictions with feature contributions. Knowledge and information systems, 41(3), 647-665.
public static double[] explainInstBasedOnVar(Classifier K, Instances D, Instances I, boolean isClassification, int classValueToExplain, int mMin, int mMax, double e, int pctErr) throws Exception{
int numOfAttr=D.numAttributes()-1;
int m[]=new int[numOfAttr]; //set all values to 0 ... first, number of samples for each attribute are 0
double psi[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpVariance[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
double tmpMean[]=new double [numOfAttr]; //set all values to 0 ... first, all contributions are 0
Vector v[]= new Vector[numOfAttr];
for(int i=0;i<D.numAttributes()-1;i++)
v[i]=new Vector();
for(int i = 0 ; i < psi.length; i++ )
psi[i] = 0;
for(int i=0;i<numOfAttr;i++)
m[i]=mMin;
double tmpPsi;
//get initial variances for each feature
for(int i=0;i<numOfAttr;i++){
for(int j=0;j<mMin;j++){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
}
}
double z;
switch(pctErr){
case 90:z=1.285;break; //for 90% of probability
case 95:z=1.645;break; //for 95% of probability
case 99:z=2.325;break; //for 99% of probability
default: z=1.285; //we set default Z for 90%
}
double zSquared=Math.pow(z, 2);
double eSquared=Math.pow(e, 2);
for(int i=0;i<numOfAttr;i++){
while(m[i]<Math.round((zSquared*tmpVariance[i])/eSquared) && m[i]<mMax){
tmpPsi=explainInstanceOneS(K, D, I, i, isClassification,classValueToExplain);
v[i].add(tmpPsi);
tmpMean[i]=mean(v[i]);
psi[i] += tmpPsi;
tmpVariance[i]=incrementalVar(v[i].size(), tmpVariance[i], tmpMean[i], tmpPsi)[3];
m[i]++;
}
}
for(int i=0;i<psi.length;i++)
psi[i]=psi[i]/m[i];
return psi;
}
public static double[] incrementalVar(double k, double M2, double mean, double sample){
//k number of elements in data
//previous variance M2 as parameter in method
//sample - sample that we add
k++;
double d = sample - mean;
mean += d / k;
M2 += d * (sample - mean);
double var = M2 / (k - 1);
//use k instead of (k-1) if want to compute the exact variance of the given data; use (k-1) if data are samples of a larger population
return new double[]{k,M2,mean,var};
}
public static int sumArr(int tab []){ //variance
return Arrays.stream(tab).sum();
}
public static double var(Vector d){ //variance in vector
double m1= 0;
for (int i = 0; i < d.size(); i++){
m1 += (Double)d.elementAt(i);
}
m1 /= d.size();
double sum = 0;
for (int i = 0; i < d.size(); i++)
sum += ((Double)d.elementAt(i) - m1) * ((Double)d.elementAt(i)- m1);
return sum / d.size();
}
public static double var(double[] d){
double m1= 0;
for (int i = 0; i < d.length; i++)
m1 += d[i];
m1 /= d.length;
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += (d[i] - m1) * (d[i]- m1);
return sum / d.length;
}
public static double var(double[] d, double m){
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += (d[i] - m) * (d[i] - m);
return sum / d.length;
}
public static double var(double[] d, double m, int max){
double sum = 0;
for (int i = 0; i < max; i++)
sum += (d[i] - m) * (d[i] - m);
return sum /max;
}
public static double mean(Vector d){ //mean in Vector
double sum = 0;
for (int i = 0; i < d.size(); i++)
sum += (Double)d.elementAt(i);
return sum / d.size();
}
public static double mean(double[] d){
double sum = 0;
for (int i = 0; i < d.length; i++)
sum += d[i];
return sum / d.length;
}
public static int idxOfMaxValue(double[] array){
int maxValAt = 0;
for(int i = 1; i < array.length; i++)
maxValAt = array[i] > array[maxValAt] ? i : maxValAt;
return maxValAt;
}
}
| 31,088 | 46.976852 | 261 | java |
featConstr | featConstr-master/src/featconstr/IndexValue.java | package featconstr;
/**
*
* @author bostjan
*/
public class IndexValue{
int i;
double v;
public IndexValue( int i, double v ) {
this.i = i;
this.v = v;
}
}
| 187 | 10.75 | 41 | java |
featConstr | featConstr-master/src/featconstr/KononenkosMDL.java | package featconstr;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import weka.core.ContingencyTables;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Range;
import weka.core.SparseInstance;
import weka.core.SpecialFunctions;
import weka.core.Utils;
import weka.filters.Filter;
/**
*
* @author bostjan
*/
@SuppressWarnings("serial")
public class KononenkosMDL extends Filter{
Instances data;
public KononenkosMDL(){
}
public KononenkosMDL(Instances data){
this.data=data;
m_DiscretizeCols.setUpper(data.numAttributes() - 1);
}
//Output binary attributes for discretized attributes.
protected boolean m_MakeBinary = false;
//Use Kononenko's MDL criterion instead of Fayyad et al.'s
protected boolean m_UseKononenko = false;
//Stores which columns to Discretize
protected Range m_DiscretizeCols = new Range();
//Store the current cutpoints
protected double[][] m_CutPoints = null;
//Use better encoding of split point for MDL.
protected boolean m_UseBetterEncoding = false;
/**
* Set cutpoints for a single attribute using MDL.
*
* @param index the index of the attribute to set cutpoints for
* @param data the data to work with
*/
public boolean input(Instance instance){
if (getInputFormat() == null){
throw new IllegalStateException("No input instance format defined");
}
if (m_NewBatch){
resetQueue();
m_NewBatch = false;
}
if (m_CutPoints != null){
convertInstance(instance);
return true;
}
bufferInput(instance);
return false;
}
/**
* Convert a single instance over. The converted instance is added to the end
* of the output queue.
*
* @param instance the instance to convert
*/
protected void convertInstance(Instance instance){
int index = 0;
double[] vals = new double[outputFormatPeek().numAttributes()];
// Copy and convert the values
for (int i = 0; i < getInputFormat().numAttributes(); i++){
if (m_DiscretizeCols.isInRange(i) && getInputFormat().attribute(i).isNumeric()){
int j;
double currentVal = instance.value(i);
if (m_CutPoints[i] == null){
if (instance.isMissing(i)){
vals[index] = Utils.missingValue();
}
else{
vals[index] = 0;
}
index++;
}
else{
if (!m_MakeBinary){
if (instance.isMissing(i)){
vals[index] = Utils.missingValue();
}
else{
for (j = 0; j < m_CutPoints[i].length; j++){
if(currentVal <= m_CutPoints[i][j]){
break;
}
}
vals[index] = j;
}
index++;
} else {
for (j = 0; j < m_CutPoints[i].length; j++) {
if (instance.isMissing(i)) {
vals[index] = Utils.missingValue();
} else if (currentVal <= m_CutPoints[i][j]) {
vals[index] = 0;
} else {
vals[index] = 1;
}
index++;
}
}
}
}
else{
vals[index] = instance.value(i);
index++;
}
}
Instance inst = null;
if (instance instanceof SparseInstance){
inst = new SparseInstance(instance.weight(), vals);
}
else {
inst = new DenseInstance(instance.weight(), vals);
}
copyValues(inst, false, instance.dataset(), outputFormatPeek());
push(inst); //No need to copy instance
}
public void printCutPoints(){
for(int i=0; i<m_CutPoints.length; i++){
for(int j=0; j<m_CutPoints[i].length;j++){
System.out.printf("%8.3f",m_CutPoints[i][j]);
}
System.out.println();
}
}
protected void calculateCutPointsByMDL(int index, Instances data) {
//Sort instances
data.sort(data.attribute(index));
m_CutPoints = new double[data.numAttributes()][];
//Find first instances that's missing
int firstMissing = data.numInstances();
for (int i = 0; i < data.numInstances(); i++) {
if (data.instance(i).isMissing(index)) {
firstMissing = i;
break;
}
}
System.out.println("firstMissing: "+firstMissing);
m_CutPoints[index] = cutPointsForSubset(data, index, 0, firstMissing);
System.out.println("idx: "+index+" value: "+m_CutPoints[index]);
}
// Generate the cutpoints for each attribute
protected void calculateCutPoints(){
Instances copy = null;
m_CutPoints = new double[data.numAttributes()][];
for (int i = data.numAttributes() - 1; i >= 0; i--){
if ((m_DiscretizeCols.isInRange(i)) && (data.attribute(i).isNumeric())){
if (copy == null) {
copy = new Instances(data);
}
calculateCutPointsByMDL(i, copy);
}
}
}
/**
* Gets the cut points for an attribute
*
* @param attributeIndex the index (from 0) of the attribute to get the cut
* points of
* @return an array containing the cutpoints (or null if the attribute
* requested isn't being Discretized
*/
public double[] getCutPoints(int attributeIndex) {
if (m_CutPoints == null) {
return null;
}
return m_CutPoints[attributeIndex];
}
/**
* Selects cutpoints for sorted subset.
*
* @param instances
* @param attIndex
* @param first
* @param lastPlusOne
* @return
*/
public double[] cutPointsForSubset(Instances instances, int attIndex, int first, int lastPlusOne){
double[][] counts, bestCounts;
double[] priorCounts, left, right, cutPoints;
double currentCutPoint = -Double.MAX_VALUE, bestCutPoint = -1, currentEntropy, bestEntropy, priorEntropy, gain;
int bestIndex = -1, numCutPoints = 0;
double numInstances = 0;
//Compute number of instances in set
if ((lastPlusOne - first) < 2) {
return null;
}
//Compute class counts.
counts = new double[2][instances.numClasses()];
for (int i = first; i < lastPlusOne; i++){
numInstances += instances.instance(i).weight();
//e.g. an instance with a weight of 2 corresponds to two identical instances with a weight of 1. (Instance weights are more flexible though because they don’t need to be integers.
//Note that any instance without a weight value specified is assumed to have a weight of 1 for backwards compatibility.
counts[1][(int) instances.instance(i).classValue()] += instances.instance(i).weight();
}
System.out.println("Method cutPointsForSubset num of instances: "+numInstances);
//Save prior counts
priorCounts = new double[instances.numClasses()];
System.arraycopy(counts[1], 0, priorCounts, 0, instances.numClasses()); //copy elements from counts[l] to priorCounts
// Entropy of the full set
priorEntropy = ContingencyTables.entropy(priorCounts);
bestEntropy = priorEntropy;
//Find best entropy.
bestCounts = new double[2][instances.numClasses()];
for (int i = first; i < (lastPlusOne - 1); i++){
counts[0][(int) instances.instance(i).classValue()] += instances.instance(i).weight();
counts[1][(int) instances.instance(i).classValue()] -= instances.instance(i).weight();
if (instances.instance(i).value(attIndex) < instances.instance(i + 1).value(attIndex)){
currentCutPoint = (instances.instance(i).value(attIndex) + instances.instance(i + 1).value(attIndex)) / 2.0;
currentEntropy = ContingencyTables.entropyConditionedOnRows(counts);
if (currentEntropy < bestEntropy){
bestCutPoint = currentCutPoint;
bestEntropy = currentEntropy;
bestIndex = i;
System.arraycopy(counts[0], 0, bestCounts[0], 0,instances.numClasses());
System.arraycopy(counts[1], 0, bestCounts[1], 0,instances.numClasses());
}
numCutPoints++;
}
}
//Use worse encoding?
if (!m_UseBetterEncoding) {
numCutPoints = (lastPlusOne - first) - 1;
}
//Checks if gain is zero
gain = priorEntropy - bestEntropy;
if (gain <= 0) {
return null;
}
//Check if split is to be accepted
if((m_UseKononenko && kononenkosMDL(priorCounts, bestCounts, numInstances,numCutPoints)) || (!m_UseKononenko && fayyadAndIranisMDL(priorCounts, bestCounts, numInstances, numCutPoints))){
//Select split points for the left and right subsets
left = cutPointsForSubset(instances, attIndex, first, bestIndex + 1);
right = cutPointsForSubset(instances, attIndex, bestIndex + 1, lastPlusOne);
//Merge cutpoints and return them
if ((left == null) && (right) == null){
cutPoints = new double[1];
cutPoints[0] = bestCutPoint;
}
else if (right == null) {
cutPoints = new double[left.length + 1];
System.arraycopy(left, 0, cutPoints, 0, left.length);
cutPoints[left.length] = bestCutPoint;
}
else if (left == null) {
cutPoints = new double[1 + right.length];
cutPoints[0] = bestCutPoint;
System.arraycopy(right, 0, cutPoints, 1, right.length);
}
else {
cutPoints = new double[left.length + right.length + 1];
System.arraycopy(left, 0, cutPoints, 0, left.length);
cutPoints[left.length] = bestCutPoint;
System.arraycopy(right, 0, cutPoints, left.length + 1, right.length);
}
return cutPoints;
}
else {
return null;
}
}
/**
* Test using Kononenko's MDL criterion.
*
* @param priorCounts
* @param bestCounts
* @param numInstances
* @param numCutPoints
* @return true if the split is acceptable
*/
private boolean kononenkosMDL(double[] priorCounts, double[][] bestCounts, double numInstances, int numCutPoints){
double distPrior, instPrior, distAfter = 0, sum, instAfter = 0;
double before, after;
int numClassesTotal;
//Number of classes occuring in the set
numClassesTotal = 0;
for (double priorCount : priorCounts) {
if (priorCount > 0) {
numClassesTotal++;
}
}
//Encode distribution prior to split
distPrior = SpecialFunctions.log2Binomial(numInstances + numClassesTotal - 1, numClassesTotal - 1);
//Encode instances prior to split.
instPrior = SpecialFunctions.log2Multinomial(numInstances, priorCounts);
before = instPrior + distPrior;
//Encode distributions and instances after split.
for (double[] bestCount : bestCounts) {
sum = Utils.sum(bestCount);
distAfter += SpecialFunctions.log2Binomial(sum + numClassesTotal - 1, numClassesTotal - 1);
instAfter += SpecialFunctions.log2Multinomial(sum, bestCount);
}
//Coding cost after split
after = Utils.log2(numCutPoints) + distAfter + instAfter;
//Check if split is to be accepted
return (before > after);
}
public double kononenkosMDL(Instances instances,int attIndex){
double distPrior, instPrior, distAfter = 0, sum, instAfter = 0;
double before, after;
int numClassesTotal;
double numInstances = 0;
//table with frequencies for each class - how many instances occur in a particular class
double [] priorCounts=Arrays.stream(instances.attributeStats(instances.classIndex()).nominalCounts).asDoubleStream().toArray(); //we convert because we need in log2Multinomial as parameter double array
//!!!we use opposite indexes (i for attribute values, j for class values) because of easier later summation ... then we use just reference to the row from 2d array -> for (double[] bestCount : matrixCounts)
//get nominal labels ... better solution than instances.attributeStats(attIndex).distinctCount because e.g., for num-of-N we can have/generate less values than are in attribute specification (nominla labels)
String nominalLabels=instances.attribute(attIndex).toString();
nominalLabels=nominalLabels.substring(nominalLabels.lastIndexOf("{"),(nominalLabels.lastIndexOf("}")+1));
int numOfLabels=nominalLabels.split(",").length;
double [][]matrixCounts=new double[numOfLabels][instances.numClasses()];
//instances
/*If the attribute is numeric then the value you get from value() is the actual value. If the attribute is nominal or string, then you get the index of the
actual nominal value returned as a double. The Attribute object of the attribute in question can give you the value (as a String) corresponding to the index. */
for (int i = 0; i < instances.numInstances(); i++){
matrixCounts[(int)instances.instance(i).value(attIndex)][(int)instances.instance(i).classValue()]++;
numInstances += instances.instance(i).weight();
}
// Number of classes occuring in the set
numClassesTotal = instances.numClasses();
//Encode distribution prior to split
distPrior = SpecialFunctions.log2Binomial(numInstances + numClassesTotal - 1, numClassesTotal - 1);
//Encode instances prior to split.
instPrior = SpecialFunctions.log2Multinomial(numInstances, priorCounts);
before = instPrior + distPrior;
for (double[] bestCount : matrixCounts) {
sum = Utils.sum(bestCount); //Utils.sum sum of all numbers in an array
distAfter += SpecialFunctions.log2Binomial(sum + numClassesTotal - 1, numClassesTotal - 1);
instAfter += SpecialFunctions.log2Multinomial(sum, bestCount);
}
after = distAfter + instAfter;
return (before-after)/numInstances;
}
/**
* Test using Fayyad and Irani's MDL criterion.
*
* @param priorCounts
* @param bestCounts
* @param numInstances
* @param numCutPoints
* @return true if the splits is acceptable
*/
private boolean fayyadAndIranisMDL(double[] priorCounts,double[][] bestCounts, double numInstances, int numCutPoints) {
double priorEntropy, entropy, gain;
double entropyLeft, entropyRight, delta;
int numClassesTotal, numClassesRight, numClassesLeft;
// Compute entropy before split.
priorEntropy = ContingencyTables.entropy(priorCounts);
// Compute entropy after split.
entropy = ContingencyTables.entropyConditionedOnRows(bestCounts);
// Compute information gain.
gain = priorEntropy - entropy;
// Number of classes occuring in the set
numClassesTotal = 0;
for (double priorCount : priorCounts) {
if (priorCount > 0) {
numClassesTotal++;
}
}
// Number of classes occuring in the left subset
numClassesLeft = 0;
for (int i = 0; i < bestCounts[0].length; i++) {
if (bestCounts[0][i] > 0) {
numClassesLeft++;
}
}
// Number of classes occuring in the right subset
numClassesRight = 0;
for (int i = 0; i < bestCounts[1].length; i++) {
if (bestCounts[1][i] > 0) {
numClassesRight++;
}
}
// Entropy of the left and the right subsets
entropyLeft = ContingencyTables.entropy(bestCounts[0]);
entropyRight = ContingencyTables.entropy(bestCounts[1]);
// Compute terms for MDL formula
delta = Utils.log2(Math.pow(3, numClassesTotal) - 2) - ((numClassesTotal * priorEntropy) - (numClassesRight * entropyRight) - (numClassesLeft * entropyLeft));
// Check if split is to be accepted
return (gain > (Utils.log2(numCutPoints) + delta) / numInstances);
}
public double impuritySplit(Instances data, int idx){
double bestEstimation;
ArrayList<SortRec> sortedAttr = new ArrayList<SortRec>();
int noAttrVal[]=new int[3];
int noClasses=data.numClasses();
System.out.println(noClasses);
int noClassAttrVal[][]=new int[noClasses+1][2+1];
int trainSize=data.numInstances();
int numOfMissing=data.attributeStats(idx).missingCount;
sortedAttr=initArrSortRec(trainSize-numOfMissing);
int j ;
int OKvalues = 0 ;
double attrValue ;
for (j=0; j<trainSize;j++){
attrValue=data.instance(j).value(idx);
if (Double.isNaN(attrValue)) //controlling missing value
continue;
sortedAttr.get(OKvalues).setKey(attrValue);
sortedAttr.get(OKvalues).setValue(j);
noClassAttrVal[(int) data.instance(j).classValue()][2]++;
OKvalues++ ;
}
if (OKvalues <= 1){ //all the cases have missing value of the attribute or only one OK
bestEstimation = - Double.MAX_VALUE ;
return - Double.MAX_VALUE; //smaller than any value, so all examples will go into one branch
}
double[][] counts;
double[] priorCounts;
double priorEntropy;
double numInstances = 0;
//Compute class counts.
counts = new double[2][data.numClasses()];
for (int i = 0; i < trainSize; i++){
numInstances += data.instance(i).weight();
//e.g. an instance with a weight of 2 corresponds to two identical instances with a weight of 1. (Instance weights are more flexible though because they don’t need to be integers.
//Note that any instance without a weight value specified is assumed to have a weight of 1 for backwards compatibility.
counts[1][(int) data.instance(i).classValue()] += data.instance(i).weight();
}
//Save prior counts
priorCounts = new double[data.numClasses()];
System.arraycopy(counts[1], 0, priorCounts, 0, data.numClasses()); //copy elements from counts[l] to priorCounts
//Entropy of the full set
priorEntropy = ContingencyTables.entropy(priorCounts);
double priorImpurity=priorEntropy;
Collections.sort(sortedAttr);
bestEstimation = - Double.MAX_VALUE ;
double est = 0, splitValue = - Double.MAX_VALUE ; //smaller than any value, so all examples will go into one branch
//initially we move some left instance from right to left
int minNodeWeightEst = 2; //minNodeWeightEst (minimal split to consider in attribute evaluation) should be non-negative ... minimal split to be evaluated
for (j=0 ; j < minNodeWeightEst ; j++) {
noClassAttrVal[(int) data.instance(j).classValue()][1]++; //increase on the left
noClassAttrVal[(int) data.instance(j).classValue()][2]--; //decrease on right
}
int upperLimit = OKvalues - minNodeWeightEst;
for ( ; j < upperLimit ; j++){
//only estimate for unique values
if(sortedAttr.get(j).getKey()!=sortedAttr.get(j-1).getKey()){
//compute heuristic measure
noAttrVal[1] = j ;
noAttrVal[2] = OKvalues - j ;
//Compute class counts.
counts = new double[2][data.numClasses()];
for (int i = 0; i < trainSize; i++){
numInstances += data.instance(i).weight();
//e.g. an instance with a weight of 2 corresponds to two identical instances with a weight of 1. (Instance weights are more flexible though because they don’t need to be integers.
//Note that any instance without a weight value specified is assumed to have a weight of 1 for backwards compatibility.
counts[1][(int) data.instance(i).classValue()] += data.instance(i).weight();
}
//Save prior counts
priorCounts = new double[data.numClasses()];
System.arraycopy(counts[1], 0, priorCounts, 0, data.numClasses()); //copy elements from counts[l] to priorCounts
//Entropy of the full set
priorEntropy = ContingencyTables.entropy(priorCounts);
priorImpurity=priorEntropy;
est=priorEntropy;
if (est > bestEstimation){
bestEstimation = est ;
splitValue=(sortedAttr.get(j).getKey() + sortedAttr.get(j-1).getKey()) / 2.0;
System.out.println("Split value internal "+ splitValue);
}
}
noClassAttrVal[(int) data.instance(j).classValue()][1]++; //increase on the left
noClassAttrVal[(int) data.instance(j).classValue()][2]--; //decrease on right
}
return splitValue ;
}
public static ArrayList<SortRec> initArrSortRec(int size){
ArrayList<SortRec> tmp=new ArrayList<SortRec>(size);
for(int i=0;i<size;i++){
tmp.add(i, new SortRec());
}
return tmp;
}
}
| 21,953 | 38.84392 | 215 | java |
featConstr | featConstr-master/src/featconstr/ModelAndAcc.java | package featconstr;
import weka.classifiers.Classifier;
/**
*
* @author bostjan
*/
public class ModelAndAcc {
private Classifier model;
private double acc; //accuracy
public ModelAndAcc(){
}
public ModelAndAcc(Classifier model, double acc){
this.model=model;
this.acc=acc;
}
public void setClassifier(Classifier model){
this.model=model;
}
public void setAcc(double acc){
this.acc=acc;
}
public Classifier getClassifier(){
return model;
}
public double getAcc(){
return acc;
}
}
| 592 | 16.969697 | 56 | java |
featConstr | featConstr-master/src/featconstr/OptionWithType.java | package featconstr;
import weka.core.Option;
import java.io.Serializable;
public class OptionWithType extends Option implements Serializable {
private static final long serialVersionUID = -251269918815312469L;
public enum ArgType{STRING, DOUBLE, INTEGER}
private final ArgType type;
/**
* Creates new option with the given parameters.
*
* @param description the option's description
* @param name the option's name
* @param numArguments the number of arguments
* @param synopsis the option's synopsis
* @param type the option's arg type
*/
public OptionWithType(String description, String name, int numArguments, String synopsis, ArgType type) {
super(description, name, numArguments, synopsis);
this.type = type;
}
public ArgType type(){
return type;
}
}
| 877 | 29.275862 | 109 | java |
featConstr | featConstr-master/src/featconstr/ParamSearchEval.java | package featconstr;
/**
*
* @author bostjan
*/
public class ParamSearchEval{
private double acc;
private int feat[]=new int[6]; //0-logical, 1-threshold, 2-furia, 3-cartesian, 4-relational, 5-numerical
private int tree[]=new int[3]; //0-tree size, 1-number of leaves, 3-sum of constructs
private int complexityFuria[]=new int[2]; //0-number of rules, 1-sum of constructs
private int numOfLogFeatInTree[]=new int[2]; //0-number of logical feat, 1-sum of constructs
private int numOfCartesian[]=new int[2]; //0-number of cartesian features in tree, 1-sum of constructs (cartesian features) in tree
private int numOfRelational[]=new int[2]; //0-number of relational features in tree, 1-sum of constructs (relational feat) in tree
private int numOfNumerical[]=new int[2]; //0-number of numerical features in tree, 1-sum of constructs (numerical feat) in tree
private int furiaThrInTree[]=new int[4]; //0-number of Furia feat, 1-sum of constructs in Furia feat, 2-number of Thr feat, 3-sum of construct in Thr
private long time[]=new long[2]; //0-feature construction time, 1-learning time
public ParamSearchEval(){
}
public ParamSearchEval(double acc, int feat[], int tree[], int complexityFuria[], int furiaThrInTree[]){
this.acc=acc;
this.feat=feat;
this.tree=tree;
this.complexityFuria=complexityFuria;
this.furiaThrInTree=furiaThrInTree;
}
//setter methods
public void setAcc(double acc){
this.acc=acc;
}
public void setFeat(int feat[]){
this.feat=feat;
}
public void setTree(int tree[]){
this.tree=tree;
}
public void setComplexityFuria(int complexityFuria[]){
this.complexityFuria=complexityFuria;
}
public void setFuriaThrComplx(int furiaThrInTree[]){
this.furiaThrInTree=furiaThrInTree;
}
public void setTime(long time[]){
this.time=time;
}
public void setNumLogFeatInTree(int numOfLogFeatInTree[]){
this.numOfLogFeatInTree=numOfLogFeatInTree;
}
public void setCartFeatInTree(int numOfCartesian[]){
this.numOfCartesian=numOfCartesian;
}
public void setRelFeatInTree(int numOfRelational[]){
this.numOfRelational=numOfRelational;
}
public void setNumFeatInTree(int numOfNumerical[]){
this.numOfNumerical=numOfNumerical;
}
//getter methods
public double getAcc(){
return acc;
}
public int[] getFeat(){
return feat;
}
public int[] getTree(){
return tree;
}
public int[] getComplexityFuria(){
return complexityFuria;
}
public int[] getFuriaThrComplx(){
return furiaThrInTree;
}
public long[] getTime(){
return time;
}
public int[] getNumLogFeatInTree(){
return numOfLogFeatInTree;
}
public int[] getCartFeatInTree(){
return numOfCartesian;
}
public int[] getRelFeatInTree(){
return numOfRelational;
}
public int[] getNumFeatInTree(){
return numOfNumerical;
}
}
| 3,187 | 33.652174 | 160 | java |
featConstr | featConstr-master/src/featconstr/Parameters.java | package featconstr;
/**
*
* @author bostjan
*/
public class Parameters{
private int numOfAttr;
private double acc;
private String evalMeth;
public Parameters( double acc, String evalMeth, int numOfAttr) {
this.acc = acc;
this.evalMeth = evalMeth;
this.numOfAttr=numOfAttr;
}
public double getAcc(){
return acc;
}
public int getNumOfAttr(){
return numOfAttr;
}
public String getEvalMeth(){
return evalMeth;
}
}
| 509 | 16 | 67 | java |
featConstr | featConstr-master/src/featconstr/ReliefFcalcDistOnOrigAttr.java | /*
* ReliefFcalcDistOnOrigAttr.java is a variant of ReliefFAttributeEval.java
* from Weka package.
* This variant of ReliefF only considers the attributes for calculating the
* distance and performs the rest of the functionality on attributes and
* features.
*
*/
package featconstr;
import java.util.Enumeration;
import java.util.Random;
import java.util.Vector;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.attributeSelection.ASEvaluation; //added by BV
import weka.attributeSelection.AttributeEvaluator; //added by BV
/**
* <!-- globalinfo-start --> ReliefFAttributeEvalBV :<br/>
* <br/>
* Evaluates the worth of an attribute by repeatedly sampling an instance and
* considering the value of the given attribute for the nearest instance of the
* same and different class. Can operate on both discrete and continuous class
* data.<br/>
* <br/>
* For more information see:<br/>
* <br/>
* Kenji Kira, Larry A. Rendell: A Practical Approach to Feature Selection. In:
* Ninth International Workshop on Machine Learning, 249-256, 1992.<br/>
* <br/>
* Igor Kononenko: Estimating Attributes: Analysis and Extensions of RELIEF. In:
* European Conference on Machine Learning, 171-182, 1994.<br/>
* <br/>
* Marko Robnik-Sikonja, Igor Kononenko: An adaptation of Relief for attribute
* estimation in regression. In: Fourteenth International Conference on Machine
* Learning, 296-304, 1997.
* <p/>
* <!-- globalinfo-end -->
*
* <!-- technical-bibtex-start --> BibTeX:
*
* <pre>
* @inproceedings{Kira1992,
* author = {Kenji Kira and Larry A. Rendell},
* booktitle = {Ninth International Workshop on Machine Learning},
* editor = {Derek H. Sleeman and Peter Edwards},
* pages = {249-256},
* publisher = {Morgan Kaufmann},
* title = {A Practical Approach to Feature Selection},
* year = {1992}
* }
*
* @inproceedings{Kononenko1994,
* author = {Igor Kononenko},
* booktitle = {European Conference on Machine Learning},
* editor = {Francesco Bergadano and Luc De Raedt},
* pages = {171-182},
* publisher = {Springer},
* title = {Estimating Attributes: Analysis and Extensions of RELIEF},
* year = {1994}
* }
*
* @inproceedings{Robnik-Sikonja1997,
* author = {Marko Robnik-Sikonja and Igor Kononenko},
* booktitle = {Fourteenth International Conference on Machine Learning},
* editor = {Douglas H. Fisher},
* pages = {296-304},
* publisher = {Morgan Kaufmann},
* title = {An adaptation of Relief for attribute estimation in regression},
* year = {1997}
* }
* </pre>
* <p/>
* <!-- technical-bibtex-end -->
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num instances>
* Specify the number of instances to
* sample when estimating attributes.
* If not specified, then all instances
* will be used.
* </pre>
*
* <pre>
* -D <seed>
* Seed for randomly sampling instances.
* (Default = 1)
* </pre>
*
* <pre>
* -K <number of neighbours>
* Number of nearest neighbours (k) used
* to estimate attribute relevances
* (Default = 10).
* </pre>
*
* <pre>
* -W
* Weight nearest neighbours by distance
* </pre>
*
* <pre>
* -A <num>
* Specify sigma value (used in an exp
* function to control how quickly
* weights for more distant instances
* decrease. Use in conjunction with -W.
* Sensible value=1/5 to 1/10 of the
* number of nearest neighbours.
* (Default = 2)
* </pre>
*
* <!-- options-end -->
*
* @author Mark Hall ([email protected])
* @version $Revision$
*/
public class ReliefFcalcDistOnOrigAttr extends ASEvaluation implements
AttributeEvaluator, OptionHandler, TechnicalInformationHandler {
/** for serialization */
static final long serialVersionUID = -8422186665795839379L;
/** The training instances */
private Instances m_trainInstances, m_trainInstancesOrig; //m_trainInstancesOrig added by BV
/** The class index */
private int m_classIndex, m_classIndexOrigData;
/** The number of attributes */
private int m_numAttribs;
/** The number of instances */
private int m_numInstances;
/** Numeric class */
private boolean m_numericClass;
/** The number of classes if class is nominal */
private int m_numClasses;
/**
* Used to hold the probability of a different class val given nearest
* instances (numeric class)
*/
private double m_ndc;
/**
* Used to hold the prob of different value of an attribute given nearest
* instances (numeric class case)
*/
private double[] m_nda;
/**
* Used to hold the prob of a different class val and different att val given
* nearest instances (numeric class case)
*/
private double[] m_ndcda;
/** Holds the weights that relief assigns to attributes */
private double[] m_weights;
/** Prior class probabilities (discrete class case) */
private double[] m_classProbs;
/**
* The number of instances to sample when estimating attributes default == -1,
* use all instances
*/
private int m_sampleM;
/** The number of nearest hits/misses */
private int m_Knn;
/** k nearest scores + instance indexes for n classes */
private double[][][] m_karray;
/** Upper bound for numeric attributes */
private double[] m_maxArray;
/** Lower bound for numeric attributes */
private double[] m_minArray;
/** Keep track of the farthest instance for each class */
private double[] m_worst;
/** Index in the m_karray of the farthest instance for each class */
private int[] m_index;
/** Number of nearest neighbours stored of each class */
private int[] m_stored;
/** Random number seed used for sampling instances */
private int m_seed;
/**
* used to (optionally) weight nearest neighbours by their distance from the
* instance in question. Each entry holds exp(-((rank(r_i, i_j)/sigma)^2))
* where rank(r_i,i_j) is the rank of instance i_j in a sequence of instances
* ordered by the distance from r_i. sigma is a user defined parameter,
* default=20
**/
private double[] m_weightsByRank;
private int m_sigma;
/** Weight by distance rather than equal weights */
private boolean m_weightByDistance;
/** Consider constructive induction - calculate distance only on (original) attributes */
private boolean considerCI=false; //added by BV
/**
* Constructor
*/
public ReliefFcalcDistOnOrigAttr() {
resetOptions();
}
public ReliefFcalcDistOnOrigAttr(Instances origData, Instances afterCIdata) { //added by BV
/*reset options*/
resetOptions();
considerCI=true;
afterCIdata.setClassIndex(afterCIdata.numAttributes()-1);
origData.setClassIndex(origData.numAttributes()-1);
m_trainInstances = afterCIdata;
m_trainInstancesOrig = origData;
}
/**
* Returns a string describing this attribute evaluator
*
* @return a description of the evaluator suitable for displaying in the
* explorer/experimenter gui
*/
public String globalInfo() {
return "ReliefFAttributeEval :\n\nEvaluates the worth of an attribute by "
+ "repeatedly sampling an instance and considering the value of the "
+ "given attribute for the nearest instance of the same and different "
+ "class. Can operate on both discrete and continuous class data.\n\n"
+ "For more information see:\n\n" + getTechnicalInformation().toString();
}
/**
* Returns an instance of a TechnicalInformation object, containing detailed
* information about the technical background of this class, e.g., paper
* reference or book this class is based on.
*
* @return the technical information about this class
*/
@Override
public TechnicalInformation getTechnicalInformation() {
TechnicalInformation result;
TechnicalInformation additional;
result = new TechnicalInformation(TechnicalInformation.Type.INPROCEEDINGS);
result.setValue(TechnicalInformation.Field.AUTHOR, "Kenji Kira and Larry A. Rendell");
result.setValue(TechnicalInformation.Field.TITLE, "A Practical Approach to Feature Selection");
result.setValue(TechnicalInformation.Field.BOOKTITLE,
"Ninth International Workshop on Machine Learning");
result.setValue(TechnicalInformation.Field.EDITOR, "Derek H. Sleeman and Peter Edwards");
result.setValue(TechnicalInformation.Field.YEAR, "1992");
result.setValue(TechnicalInformation.Field.PAGES, "249-256");
result.setValue(TechnicalInformation.Field.PUBLISHER, "Morgan Kaufmann");
additional = result.add(TechnicalInformation.Type.INPROCEEDINGS);
additional.setValue(TechnicalInformation.Field.AUTHOR, "Igor Kononenko");
additional.setValue(TechnicalInformation.Field.TITLE,
"Estimating Attributes: Analysis and Extensions of RELIEF");
additional.setValue(TechnicalInformation.Field.BOOKTITLE,
"European Conference on Machine Learning");
additional.setValue(TechnicalInformation.Field.EDITOR, "Francesco Bergadano and Luc De Raedt");
additional.setValue(TechnicalInformation.Field.YEAR, "1994");
additional.setValue(TechnicalInformation.Field.PAGES, "171-182");
additional.setValue(TechnicalInformation.Field.PUBLISHER, "Springer");
additional = result.add(TechnicalInformation.Type.INPROCEEDINGS);
additional
.setValue(TechnicalInformation.Field.AUTHOR, "Marko Robnik-Sikonja and Igor Kononenko");
additional.setValue(TechnicalInformation.Field.TITLE,
"An adaptation of Relief for attribute estimation in regression");
additional.setValue(TechnicalInformation.Field.BOOKTITLE,
"Fourteenth International Conference on Machine Learning");
additional.setValue(TechnicalInformation.Field.EDITOR, "Douglas H. Fisher");
additional.setValue(TechnicalInformation.Field.YEAR, "1997");
additional.setValue(TechnicalInformation.Field.PAGES, "296-304");
additional.setValue(TechnicalInformation.Field.PUBLISHER, "Morgan Kaufmann");
return result;
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
**/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(4);
newVector.addElement(new Option("\tSpecify the number of instances to\n"
+ "\tsample when estimating attributes.\n"
+ "\tIf not specified, then all instances\n" + "\twill be used.", "M", 1,
"-M <num instances>"));
newVector.addElement(new Option("\tSeed for randomly sampling instances.\n"
+ "\t(Default = 1)", "D", 1, "-D <seed>"));
newVector.addElement(new Option("\tNumber of nearest neighbours (k) used\n"
+ "\tto estimate attribute relevances\n" + "\t(Default = 10).", "K", 1,
"-K <number of neighbours>"));
newVector.addElement(new Option("\tWeight nearest neighbours by distance",
"W", 0, "-W"));
newVector.addElement(new Option("\tSpecify sigma value (used in an exp\n"
+ "\tfunction to control how quickly\n"
+ "\tweights for more distant instances\n"
+ "\tdecrease. Use in conjunction with -W.\n"
+ "\tSensible value=1/5 to 1/10 of the\n"
+ "\tnumber of nearest neighbours.\n" + "\t(Default = 2)", "A", 1,
"-A <num>"));
return newVector.elements();
}
/**
* Parses a given list of options.
* <p/>
*
* <!-- options-start --> Valid options are:
* <p/>
*
* <pre>
* -M <num instances>
* Specify the number of instances to
* sample when estimating attributes.
* If not specified, then all instances
* will be used.
* </pre>
*
* <pre>
* -D <seed>
* Seed for randomly sampling instances.
* (Default = 1)
* </pre>
*
* <pre>
* -K <number of neighbours>
* Number of nearest neighbours (k) used
* to estimate attribute relevances
* (Default = 10).
* </pre>
*
* <pre>
* -W
* Weight nearest neighbours by distance
* </pre>
*
* <pre>
* -A <num>
* Specify sigma value (used in an exp
* function to control how quickly
* weights for more distant instances
* decrease. Use in conjunction with -W.
* Sensible value=1/5 to 1/10 of the
* number of nearest neighbours.
* (Default = 2)
* </pre>
*
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
String optionString;
resetOptions();
setWeightByDistance(Utils.getFlag('W', options));
optionString = Utils.getOption('M', options);
if (optionString.length() != 0) {
setSampleSize(Integer.parseInt(optionString));
}
optionString = Utils.getOption('D', options);
if (optionString.length() != 0) {
setSeed(Integer.parseInt(optionString));
}
optionString = Utils.getOption('K', options);
if (optionString.length() != 0) {
setNumNeighbours(Integer.parseInt(optionString));
}
optionString = Utils.getOption('A', options);
if (optionString.length() != 0) {
setWeightByDistance(true); // turn on weighting by distance
setSigma(Integer.parseInt(optionString));
}
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String sigmaTipText() {
return "Set influence of nearest neighbours. Used in an exp function to "
+ "control how quickly weights decrease for more distant instances. "
+ "Use in conjunction with weightByDistance. Sensible values = 1/5 to "
+ "1/10 the number of nearest neighbours.";
}
/**
* Sets the sigma value.
*
* @param s the value of sigma (> 0)
* @throws Exception if s is not positive
*/
public void setSigma(int s) throws Exception {
if (s <= 0) {
throw new Exception("value of sigma must be > 0!");
}
m_sigma = s;
}
/**
* Get the value of sigma.
*
* @return the sigma value.
*/
public int getSigma() {
return m_sigma;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String numNeighboursTipText() {
return "Number of nearest neighbours for attribute estimation.";
}
/**
* Set the number of nearest neighbours
*
* @param n the number of nearest neighbours.
*/
public void setNumNeighbours(int n) {
m_Knn = n;
}
/**
* Get the number of nearest neighbours
*
* @return the number of nearest neighbours
*/
public int getNumNeighbours() {
return m_Knn;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String seedTipText() {
return "Random seed for sampling instances.";
}
/**
* Set the random number seed for randomly sampling instances.
*
* @param s the random number seed.
*/
public void setSeed(int s) {
m_seed = s;
}
/**
* Get the seed used for randomly sampling instances.
*
* @return the random number seed.
*/
public int getSeed() {
return m_seed;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String sampleSizeTipText() {
return "Number of instances to sample. Default (-1) indicates that all "
+ "instances will be used for attribute estimation.";
}
/**
* Set the number of instances to sample for attribute estimation
*
* @param s the number of instances to sample.
*/
public void setSampleSize(int s) {
m_sampleM = s;
}
/**
* Get the number of instances used for estimating attributes
*
* @return the number of instances.
*/
public int getSampleSize() {
return m_sampleM;
}
/**
* Returns the tip text for this property
*
* @return tip text for this property suitable for displaying in the
* explorer/experimenter gui
*/
public String weightByDistanceTipText() {
return "Weight nearest neighbours by their distance.";
}
/**
* Set the nearest neighbour weighting method
*
* @param b true nearest neighbours are to be weighted by distance.
*/
public void setWeightByDistance(boolean b) {
m_weightByDistance = b;
}
/**
* Get whether nearest neighbours are being weighted by distance
*
* @return m_weightByDiffernce
*/
public boolean getWeightByDistance() {
return m_weightByDistance;
}
/**
* Gets the current settings of ReliefFAttributeEvalBV.
*
* @return an array of strings suitable for passing to setOptions()
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (getWeightByDistance()) {
options.add("-W");
}
options.add("-M");
options.add("" + getSampleSize());
options.add("-D");
options.add("" + getSeed());
options.add("-K");
options.add("" + getNumNeighbours());
if (getWeightByDistance()) {
options.add("-A");
options.add("" + getSigma());
}
return options.toArray(new String[0]);
}
/**
* Return a description of the ReliefF attribute evaluator.
*
* @return a description of the evaluator as a String.
*/
@Override
public String toString() {
StringBuffer text = new StringBuffer();
if (m_trainInstances == null) {
text.append("ReliefF feature evaluator has not been built yet\n");
} else {
text.append("\tReliefF Ranking Filter");
text.append("\n\tInstances sampled: ");
if (m_sampleM == -1) {
text.append("all\n");
} else {
text.append(m_sampleM + "\n");
}
text.append("\tNumber of nearest neighbours (k): " + m_Knn + "\n");
if (m_weightByDistance) {
text.append("\tExponentially decreasing (with distance) "
+ "influence for\n" + "\tnearest neighbours. Sigma: " + m_sigma
+ "\n");
} else {
text.append("\tEqual influence nearest neighbours\n");
}
}
return text.toString();
}
/**
* Returns the capabilities of this evaluator.
*
* @return the capabilities of this evaluator
* @see Capabilities
*/
@Override
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities();
result.disableAll();
// attributes
result.enable(Capabilities.Capability.NOMINAL_ATTRIBUTES);
result.enable(Capabilities.Capability.NUMERIC_ATTRIBUTES);
result.enable(Capabilities.Capability.DATE_ATTRIBUTES);
result.enable(Capabilities.Capability.MISSING_VALUES);
// class
result.enable(Capabilities.Capability.NOMINAL_CLASS);
result.enable(Capabilities.Capability.NUMERIC_CLASS);
result.enable(Capabilities.Capability.DATE_CLASS);
result.enable(Capabilities.Capability.MISSING_CLASS_VALUES);
return result;
}
/**
* Initializes a ReliefF attribute evaluator.
*
* @param data set of instances serving as training data
* @throws Exception if the evaluator has not been generated successfully
*/
@Override
public void buildEvaluator(Instances data) throws Exception {
int z, totalInstances;
Random r = new Random(m_seed);
// can evaluator handle data?
getCapabilities().testWithFail(data);
m_trainInstances = data;
m_classIndex = m_trainInstances.classIndex();
if(considerCI) //added by BV
m_classIndexOrigData = m_trainInstancesOrig.classIndex(); //added by BV
m_numAttribs = m_trainInstances.numAttributes();
m_numInstances = m_trainInstances.numInstances();
if (m_trainInstances.attribute(m_classIndex).isNumeric()) {
m_numericClass = true;
} else {
m_numericClass = false;
}
if (!m_numericClass) {
m_numClasses = m_trainInstances.attribute(m_classIndex).numValues();
} else {
m_ndc = 0;
m_numClasses = 1;
m_nda = new double[m_numAttribs];
m_ndcda = new double[m_numAttribs];
}
if (m_weightByDistance) // set up the rank based weights
{
m_weightsByRank = new double[m_Knn];
for (int i = 0; i < m_Knn; i++) {
m_weightsByRank[i] = Math
.exp(-((i / (double) m_sigma) * (i / (double) m_sigma)));
}
}
// the final attribute weights
m_weights = new double[m_numAttribs];
// num classes (1 for numeric class) knn neighbours,
// and 0 = distance, 1 = instance index
m_karray = new double[m_numClasses][m_Knn][2];
if (!m_numericClass) {
m_classProbs = new double[m_numClasses];
for (int i = 0; i < m_numInstances; i++) {
if (!m_trainInstances.instance(i).classIsMissing()) {
m_classProbs[(int) m_trainInstances.instance(i).value(m_classIndex)]++;
}
}
for (int i = 0; i < m_numClasses; i++) {
m_classProbs[i] /= m_numInstances;
}
}
m_worst = new double[m_numClasses];
m_index = new int[m_numClasses];
m_stored = new int[m_numClasses];
m_minArray = new double[m_numAttribs];
m_maxArray = new double[m_numAttribs];
for (int i = 0; i < m_numAttribs; i++) {
m_minArray[i] = m_maxArray[i] = Double.NaN;
}
for (int i = 0; i < m_numInstances; i++) {
updateMinMax(m_trainInstances.instance(i));
}
if ((m_sampleM > m_numInstances) || (m_sampleM < 0)) {
totalInstances = m_numInstances;
} else {
totalInstances = m_sampleM;
}
// process each instance, updating attribute weights
for (int i = 0; i < totalInstances; i++) {
if (totalInstances == m_numInstances) {
z = i;
} else {
z = r.nextInt() % m_numInstances;
}
if (z < 0) {
z *= -1;
}
if (!(m_trainInstances.instance(z).isMissing(m_classIndex))) {
// first clear the knn and worst index stuff for the classes
for (int j = 0; j < m_numClasses; j++) {
m_index[j] = m_stored[j] = 0;
for (int k = 0; k < m_Knn; k++) {
m_karray[j][k][0] = m_karray[j][k][1] = 0;
}
}
findKHitMiss(z);
if (m_numericClass) {
updateWeightsNumericClass(z);
} else {
updateWeightsDiscreteClass(z);
}
}
}
// now scale weights by 1/m_numInstances (nominal class) or
// calculate weights numeric class
// System.out.println("num inst:"+m_numInstances+" r_ndc:"+r_ndc);
for (int i = 0; i < m_numAttribs; i++) {
if (i != m_classIndex) {
if (m_numericClass) {
m_weights[i] = m_ndcda[i] / m_ndc
- ((m_nda[i] - m_ndcda[i]) / (totalInstances - m_ndc));
} else {
m_weights[i] *= (1.0 / totalInstances);
}
}
}
}
/**
* Evaluates an individual attribute using ReliefF's instance based approach.
* The actual work is done by buildEvaluator which evaluates all features.
*
* @param attribute the index of the attribute to be evaluated
* @return
* @throws Exception if the attribute could not be evaluated
*/
@Override
public double evaluateAttribute(int attribute) throws Exception {
return m_weights[attribute];
}
/**
* Reset options to their default values
*/
protected final void resetOptions() {
m_trainInstances = null;
m_sampleM = -1;
m_Knn = 10;
m_sigma = 2;
m_weightByDistance = false;
m_seed = 1;
}
/**
* Normalizes a given value of a numeric attribute.
*
* @param x the value to be normalized
* @param i the attribute's index
* @return the normalized value
*/
private double norm(double x, int i) {
if (Double.isNaN(m_minArray[i]) || Utils.eq(m_maxArray[i], m_minArray[i])) {
return 0;
} else {
return (x - m_minArray[i]) / (m_maxArray[i] - m_minArray[i]);
}
}
/**
* Updates the minimum and maximum values for all the attributes based on a
* new instance.
*
* @param instance the new instance
*/
private void updateMinMax(Instance instance) {
// for (int j = 0; j < m_numAttribs; j++) {
try {
for (int j = 0; j < instance.numValues(); j++) {
if ((instance.attributeSparse(j).isNumeric())
&& (!instance.isMissingSparse(j))) {
if (Double.isNaN(m_minArray[instance.index(j)])) {
m_minArray[instance.index(j)] = instance.valueSparse(j);
m_maxArray[instance.index(j)] = instance.valueSparse(j);
} else {
if (instance.valueSparse(j) < m_minArray[instance.index(j)]) {
m_minArray[instance.index(j)] = instance.valueSparse(j);
} else {
if (instance.valueSparse(j) > m_maxArray[instance.index(j)]) {
m_maxArray[instance.index(j)] = instance.valueSparse(j);
}
}
}
}
}
} catch (Exception ex) {
System.err.println(ex);
ex.printStackTrace();
}
}
/**
* Computes the difference between two given attribute values.
*/
private double difference(int index, double val1, double val2) {
switch (m_trainInstances.attribute(index).type()) {
case Attribute.NOMINAL:
// If attribute is nominal
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) {
return (1.0 - (1.0 / (m_trainInstances.attribute(index).numValues())));
} else if ((int) val1 != (int) val2) {
return 1;
} else {
return 0;
}
case Attribute.NUMERIC:
// If attribute is numeric
if (Utils.isMissingValue(val1) || Utils.isMissingValue(val2)) {
if (Utils.isMissingValue(val1) && Utils.isMissingValue(val2)) {
return 1;
} else {
double diff;
if (Utils.isMissingValue(val2)) {
diff = norm(val1, index);
} else {
diff = norm(val2, index);
}
if (diff < 0.5) {
diff = 1.0 - diff;
}
return diff;
}
} else {
return Math.abs(norm(val1, index) - norm(val2, index));
}
default:
return 0;
}
}
/**
* Calculates the distance between two instances
*
* @param first the first instance
* @param second the second instance
* @return the distance between the two given instances, between 0 and 1
*/
private double distance(Instance first, Instance second) {
double distance = 0;
int firstI, secondI;
for (int p1 = 0, p2 = 0; p1 < first.numValues() || p2 < second.numValues();) {
if (p1 >= first.numValues()) {
firstI = considerCI ? m_trainInstancesOrig.numAttributes() : m_trainInstances.numAttributes(); //added by BV //orig. firstI = m_trainInstances.numAttributes();
} else {
firstI = first.index(p1);
}
if (p2 >= second.numValues()) {
secondI = considerCI ? m_trainInstancesOrig.numAttributes() : m_trainInstances.numAttributes(); //added by BV //orig. secondI = m_trainInstances.numAttributes();
} else {
secondI = second.index(p2);
}
if (firstI == (considerCI ? m_trainInstancesOrig.classIndex() : m_trainInstances.classIndex())) { //added by BV //orig. if (firstI == m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == (considerCI ? m_trainInstancesOrig.classIndex() : m_trainInstances.classIndex())) { //added by BV //orig. if (secondI == m_trainInstances.classIndex()) {
p2++;
continue;
}
double diff;
if (firstI == secondI) {
diff = difference(firstI, first.valueSparse(p1), second.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
diff = difference(secondI, 0, second.valueSparse(p2));
p2++;
} else {
diff = difference(firstI, first.valueSparse(p1), 0);
p1++;
}
// distance += diff * diff;
distance += diff;
}
// return Math.sqrt(distance / m_NumAttributesUsed);
return distance;
}
/**
* update attribute weights given an instance when the class is numeric
*
* @param instNum the index of the instance to use when updating weights
*/
private void updateWeightsNumericClass(int instNum) {
int i, j;
double temp, temp2;
int[] tempSorted = null;
double[] tempDist = null;
double distNorm = 1.0;
int firstI, secondI;
Instance inst = m_trainInstances.instance(instNum);
// sort nearest neighbours and set up normalization variable
if (m_weightByDistance) {
tempDist = new double[m_stored[0]];
for (j = 0, distNorm = 0; j < m_stored[0]; j++) {
// copy the distances
tempDist[j] = m_karray[0][j][0];
// sum normalizer
distNorm += m_weightsByRank[j];
}
tempSorted = Utils.sort(tempDist);
}
for (i = 0; i < m_stored[0]; i++) {
// P diff prediction (class) given nearest instances
if (m_weightByDistance) {
temp = difference(
m_classIndex,
inst.value(m_classIndex),
m_trainInstances.instance((int) m_karray[0][tempSorted[i]][1]).value(
m_classIndex));
temp *= (m_weightsByRank[i] / distNorm);
} else {
temp = difference(m_classIndex, inst.value(m_classIndex),
m_trainInstances.instance((int) m_karray[0][i][1])
.value(m_classIndex));
temp *= (1.0 / m_stored[0]); // equal influence
}
m_ndc += temp;
Instance cmp;
cmp = (m_weightByDistance) ? m_trainInstances
.instance((int) m_karray[0][tempSorted[i]][1]) : m_trainInstances
.instance((int) m_karray[0][i][1]);
double temp_diffP_diffA_givNearest = difference(m_classIndex,
inst.value(m_classIndex), cmp.value(m_classIndex));
// now the attributes
for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) {
if (p1 >= inst.numValues()) {
firstI = m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == m_trainInstances.classIndex()) {
p2++;
continue;
}
temp = 0.0;
temp2 = 0.0;
if (firstI == secondI) {
j = firstI;
temp = difference(j, inst.valueSparse(p1), cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
j = secondI;
temp = difference(j, 0, cmp.valueSparse(p2));
p2++;
} else {
j = firstI;
temp = difference(j, inst.valueSparse(p1), 0);
p1++;
}
temp2 = temp_diffP_diffA_givNearest * temp;
// P of different prediction and different att value given
// nearest instances
if (m_weightByDistance) {
temp2 *= (m_weightsByRank[i] / distNorm);
} else {
temp2 *= (1.0 / m_stored[0]); // equal influence
}
m_ndcda[j] += temp2;
// P of different attribute val given nearest instances
if (m_weightByDistance) {
temp *= (m_weightsByRank[i] / distNorm);
} else {
temp *= (1.0 / m_stored[0]); // equal influence
}
m_nda[j] += temp;
}
}
}
/**
* update attribute weights given an instance when the class is discrete
*
* @param instNum the index of the instance to use when updating weights
*/
private void updateWeightsDiscreteClass(int instNum) {
int i, j, k;
int cl;
double temp_diff, w_norm = 1.0;
double[] tempDistClass;
int[] tempSortedClass = null;
double distNormClass = 1.0;
double[] tempDistAtt;
int[][] tempSortedAtt = null;
double[] distNormAtt = null;
int firstI, secondI;
// store the indexes (sparse instances) of non-zero elements
Instance inst = m_trainInstances.instance(instNum);
// get the class of this instance
cl = (int) m_trainInstances.instance(instNum).value(m_classIndex);
// sort nearest neighbours and set up normalization variables
if (m_weightByDistance) {
// do class (hits) first
// sort the distances
tempDistClass = new double[m_stored[cl]];
for (j = 0, distNormClass = 0; j < m_stored[cl]; j++) {
// copy the distances
tempDistClass[j] = m_karray[cl][j][0];
// sum normalizer
distNormClass += m_weightsByRank[j];
}
tempSortedClass = Utils.sort(tempDistClass);
// do misses (other classes)
tempSortedAtt = new int[m_numClasses][1];
distNormAtt = new double[m_numClasses];
for (k = 0; k < m_numClasses; k++) {
if (k != cl) // already done cl
{
// sort the distances
tempDistAtt = new double[m_stored[k]];
for (j = 0, distNormAtt[k] = 0; j < m_stored[k]; j++) {
// copy the distances
tempDistAtt[j] = m_karray[k][j][0];
// sum normalizer
distNormAtt[k] += m_weightsByRank[j];
}
tempSortedAtt[k] = Utils.sort(tempDistAtt);
}
}
}
if (m_numClasses > 2) {
// the amount of probability space left after removing the
// probability of this instance's class value
w_norm = (1.0 - m_classProbs[cl]);
}
// do the k nearest hits of the same class
for (j = 0, temp_diff = 0.0; j < m_stored[cl]; j++) {
Instance cmp;
cmp = (m_weightByDistance) ? m_trainInstances
.instance((int) m_karray[cl][tempSortedClass[j]][1]) : m_trainInstances
.instance((int) m_karray[cl][j][1]);
for (int p1 = 0, p2 = 0; p1 < inst.numValues() || p2 < cmp.numValues();) {
if (p1 >= inst.numValues()) {
firstI = m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == m_trainInstances.classIndex()) {
p2++;
continue;
}
if (firstI == secondI) {
i = firstI;
temp_diff = difference(i, inst.valueSparse(p1), cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
i = secondI;
temp_diff = difference(i, 0, cmp.valueSparse(p2));
p2++;
} else {
i = firstI;
temp_diff = difference(i, inst.valueSparse(p1), 0);
p1++;
}
if (m_weightByDistance) {
temp_diff *= (m_weightsByRank[j] / distNormClass);
} else {
if (m_stored[cl] > 0) {
temp_diff /= m_stored[cl];
}
}
m_weights[i] -= temp_diff;
}
}
// now do k nearest misses from each of the other classes
temp_diff = 0.0;
for (k = 0; k < m_numClasses; k++) {
if (k != cl) // already done cl
{
for (j = 0; j < m_stored[k]; j++) {
Instance cmp;
cmp = (m_weightByDistance) ? m_trainInstances
.instance((int) m_karray[k][tempSortedAtt[k][j]][1])
: m_trainInstances.instance((int) m_karray[k][j][1]);
for (int p1 = 0, p2 = 0; p1 < inst.numValues()
|| p2 < cmp.numValues();) {
if (p1 >= inst.numValues()) {
firstI = m_trainInstances.numAttributes();
} else {
firstI = inst.index(p1);
}
if (p2 >= cmp.numValues()) {
secondI = m_trainInstances.numAttributes();
} else {
secondI = cmp.index(p2);
}
if (firstI == m_trainInstances.classIndex()) {
p1++;
continue;
}
if (secondI == m_trainInstances.classIndex()) {
p2++;
continue;
}
if (firstI == secondI) {
i = firstI;
temp_diff = difference(i, inst.valueSparse(p1),
cmp.valueSparse(p2));
p1++;
p2++;
} else if (firstI > secondI) {
i = secondI;
temp_diff = difference(i, 0, cmp.valueSparse(p2));
p2++;
} else {
i = firstI;
temp_diff = difference(i, inst.valueSparse(p1), 0);
p1++;
}
if (m_weightByDistance) {
temp_diff *= (m_weightsByRank[j] / distNormAtt[k]);
} else {
if (m_stored[k] > 0) {
temp_diff /= m_stored[k];
}
}
if (m_numClasses > 2) {
m_weights[i] += ((m_classProbs[k] / w_norm) * temp_diff);
} else {
m_weights[i] += temp_diff;
}
}
}
}
}
}
/**
* Find the K nearest instances to supplied instance if the class is numeric,
* or the K nearest Hits (same class) and Misses (K from each of the other
* classes) if the class is discrete.
*
* @param instNum the index of the instance to find nearest neighbours of
*/
private void findKHitMiss(int instNum) {
int i, j;
int cl;
double ww;
double temp_diff = 0.0;
Instance thisInst = considerCI ? m_trainInstancesOrig.instance(instNum) : m_trainInstances.instance(instNum); //changed by BV //orig. Instance thisInst = m_trainInstances.instance(instNum);
for (i = 0; i < m_numInstances; i++){
if (i != instNum){
Instance cmpInst = considerCI ? m_trainInstancesOrig.instance(i) : m_trainInstances.instance(i); //changed by BV //orig. Instance cmpInst = m_trainInstances.instance(i);
temp_diff = distance(cmpInst, thisInst);
// class of this training instance or 0 if numeric
if (m_numericClass) {
cl = 0;
} else {
if (considerCI ? m_trainInstancesOrig.instance(i).classIsMissing(): m_trainInstances.instance(i).classIsMissing()) { //changed by BV //orig. if (m_trainInstances.instance(i).classIsMissing()) {
// skip instances with missing class values in the nominal class case
continue;
}
cl = considerCI ? (int) m_trainInstancesOrig.instance(i).value(m_classIndexOrigData) : (int) m_trainInstances.instance(i).value(m_classIndex); //changed by BV //orig. cl = (int) m_trainInstances.instance(i).value(m_classIndex);
}
// add this diff to the list for the class of this instance
if (m_stored[cl] < m_Knn) {
m_karray[cl][m_stored[cl]][0] = temp_diff;
m_karray[cl][m_stored[cl]][1] = i;
m_stored[cl]++;
// note the worst diff for this class
for (j = 0, ww = -1.0; j < m_stored[cl]; j++) {
if (m_karray[cl][j][0] > ww) {
ww = m_karray[cl][j][0];
m_index[cl] = j;
}
}
m_worst[cl] = ww;
} else
/*
* if we already have stored knn for this class then check to see if
* this instance is better than the worst
*/
{
if (temp_diff < m_karray[cl][m_index[cl]][0]) {
m_karray[cl][m_index[cl]][0] = temp_diff;
m_karray[cl][m_index[cl]][1] = i;
for (j = 0, ww = -1.0; j < m_stored[cl]; j++) {
if (m_karray[cl][j][0] > ww) {
ww = m_karray[cl][j][0];
m_index[cl] = j;
}
}
m_worst[cl] = ww;
}
}
}
}
}
/**
* Returns the revision string.
*
* @return the revision
*/
@Override
public String getRevision() {
return RevisionUtils.extract("$Revision$");
}
@Override
public int[] postProcess(int[] attributeSet) {
// save memory
m_trainInstances = new Instances(m_trainInstances, 0);
return attributeSet;
}
}
| 41,124 | 29.64456 | 241 | java |
featConstr | featConstr-master/src/featconstr/RobniksMSE.java | package featconstr;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import weka.core.Instances;
/**
*
* @author bostjan
*/
public class RobniksMSE {
public RobniksMSE(){
}
ArrayList<Double> NumEstimation = new ArrayList<>();
ArrayList<Double> DiscEstimation = new ArrayList<>();
ArrayList<Double> splitPoint = new ArrayList<>();
ArrayList<Integer> discNoValues = new ArrayList<>();
List<List<Integer>> DiscValues = new ArrayList<>();
List<List<Double>> NumValues = new ArrayList<>();
ArrayList<Double> weight = new ArrayList<>();
public int noDiscrete;
public int noNumeric;
double epsilon=1E-7;
public final void mseDev(int contAttrFrom, int contAttrTo, int discAttrFrom, int discAttrTo, Instances data){
int trainSize=data.numInstances();
weight=calculateWeights(data);
int i;
int j;
ArrayList<Double> valueClass = new ArrayList<>();
ArrayList<Double> valueWeight = new ArrayList<>();
ArrayList<Double> squaredValues = new ArrayList<>();
ArrayList<SortRec> sortedMean = new ArrayList<>();
int idx;
int OKvalues;
double totalWeight;
double value;
double bestEstimate;
double estimate;
double pLeft;
double variance;
double LeftValues;
double LeftSquares;
double LeftWeight;
double RightValues;
double RightSquares;
double RightWeight;
for (i = discAttrFrom ; i < discAttrTo ; i++){
valueClass.clear();
valueWeight.clear();
squaredValues.clear();
for (j = 0 ; j < trainSize ; j++){
idx = DiscValues.get(j).get(i);
value = NumValues.get(j).get(0);
valueClass.set(idx, valueClass.get(idx)+ weight.get(j) * value);
valueWeight.set(idx, valueWeight.get(idx) + weight.get(j));
squaredValues.set(idx, squaredValues.get(idx)+weight.get(j)*value*value);
}
sortedMean=new ArrayList<>(discNoValues.get(i));
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 1 ; j <= discNoValues.get(i); j++){
if (valueWeight.get(j) > epsilon){
sortedMean.get(OKvalues).setKey(valueClass.get(j) / valueWeight.get(j));
sortedMean.get(OKvalues).setValue(j);
OKvalues++;
RightWeight +=valueWeight.get(j);
RightSquares += squaredValues.get(j);
RightValues +=valueClass.get(j);
}
}
totalWeight = RightWeight;
Collections.sort(sortedMean);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
int upper = OKvalues - 1;
for (j = 0 ; j < upper ; j++){
idx = sortedMean.get(j).getValue();
LeftSquares += squaredValues.get(idx);
LeftValues +=valueClass.get(idx);
LeftWeight +=valueWeight.get(idx);
RightSquares -=squaredValues.get(idx);
RightValues -=valueClass.get(idx);
RightWeight -=valueWeight.get(idx);
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow((LeftValues / LeftWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow((RightValues / RightWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate)
bestEstimate = estimate;
}
DiscEstimation.set(i, - bestEstimate);
}
//continuous values
double dVal;
ArrayList<SortRec> sortedAttr = new ArrayList<>();
for (i = contAttrFrom ; i < contAttrTo ; i++){
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 0 ; j < trainSize ; j++){
if (NumValues.get(j).get(i)==null)
continue;
sortedAttr.get(OKvalues).setKey(NumValues.get(j).get(i));
sortedAttr.get(OKvalues).setValue(j);
RightWeight += weight.get(j);
dVal = weight.get(j) * NumValues.get(j).get(0);
RightValues += dVal;
dVal *= NumValues.get(j).get(0);
RightSquares += dVal;
OKvalues++;
}
totalWeight = RightWeight;
Collections.sort(sortedAttr);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
j = 0;
while (j < OKvalues){
//collect cases with the same value of the attribute - we cannot split between them
do{
idx = sortedAttr.get(j).getValue();
dVal = weight.get(idx) * NumValues.get(idx).get(0);
LeftValues += dVal;
RightValues -= dVal;
dVal *= NumValues.get(idx).get(0);
LeftSquares += dVal;
RightSquares -= dVal;
LeftWeight += weight.get(idx);
RightWeight -= weight.get(idx);
j++;
}while (j < OKvalues && sortedAttr.get(j).getKey() == sortedAttr.get(j-1).getKey());
if (j == OKvalues)
break;
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow(LeftValues / LeftWeight,2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow(RightValues / RightWeight,2);
if (RightWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate){
bestEstimate = estimate;
splitPoint.set(i, (sortedAttr.get(j).getKey() + sortedAttr.get(j-1).getKey()) / 2.0);
}
}
NumEstimation.set(i, - bestEstimate);
}
}
public ArrayList<Double> mseNumericAttr(int contAttrFrom, int contAttrTo, Instances data){
int i, j, idx, OKvalues;
double totalWeight, bestEstimate, estimate, pLeft, variance, LeftValues, LeftSquares, LeftWeight, RightValues, RightSquares, RightWeight;
int trainSize=data.numInstances();
weight=calculateWeights(data);
NumEstimation=new ArrayList<>(Collections.nCopies(contAttrTo-contAttrFrom, 0.0));
splitPoint=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0.0));
//continuous values
double dVal;
ArrayList<SortRec> sortedAttr = new ArrayList<>(trainSize);
sortedAttr=initArrSortRec(trainSize);
for (i = contAttrFrom ; i < contAttrTo ; i++){
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 0 ; j < trainSize ; j++){
if (Double.isNaN(data.instance(j).value(i)))
continue;
sortedAttr.get(OKvalues).setKey(data.instance(j).value(i));
sortedAttr.get(OKvalues).setValue(j);
RightWeight += data.instance(j).weight();
dVal = data.instance(j).weight() * data.instance(j).classValue();
RightValues += dVal;
dVal *= data.instance(j).classValue();
RightSquares += dVal;
OKvalues++;
}
totalWeight = RightWeight;
Collections.sort(sortedAttr);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
j = 0;
while (j < OKvalues){
//collect cases with the same value of the attribute - we cannot split between them
do{
idx = sortedAttr.get(j).getValue();
dVal = data.instance(idx).weight() * data.instance(idx).classValue();
LeftValues += dVal;
RightValues -= dVal;
dVal *= data.instance(idx).classValue();
LeftSquares += dVal;
RightSquares -= dVal;
LeftWeight += data.instance(idx).weight();
RightWeight -= data.instance(idx).weight();
j++;
} while (j < OKvalues && sortedAttr.get(j).getKey() == sortedAttr.get(j-1).getKey());
if (j == OKvalues)
break;
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow(LeftValues / LeftWeight,2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow(RightValues / RightWeight,2);
if (RightWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate){
bestEstimate = estimate;
splitPoint.set(i, (sortedAttr.get(j).getKey() + sortedAttr.get(j-1).getKey()) / 2.0);
}
}
NumEstimation.set(i, - bestEstimate);
}
return NumEstimation;
}
public ArrayList<Double> mseDiscreteAttr(int discAttrFrom, int discAttrTo, Instances data){
int i, j, idx, OKvalues;
double totalWeight, bestEstimate, estimate, pLeft, variance, LeftValues, LeftSquares, LeftWeight, RightValues, RightSquares, RightWeight, value;
int trainSize=data.numInstances();
weight=calculateWeights(data);
DiscEstimation=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0.0));
ArrayList<Double> valueClass = new ArrayList<>();
ArrayList<Double> valueWeight = new ArrayList<>();
ArrayList<Double> squaredValues = new ArrayList<>();
ArrayList<SortRec> sortedMean = new ArrayList<>();
discNoValues=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0));
//estimationReg of discrete attributtes
for (i = discAttrFrom ; i < discAttrTo ; i++){
discNoValues.set(i, data.numDistinctValues(i));
valueClass=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0.0));
valueWeight=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0.0));
squaredValues=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0.0));
for (j = 0 ; j < trainSize ; j++){
idx = (int) data.instance(j).value(i);
value = data.instance(j).classValue();
valueClass.set(idx, valueClass.get(idx)+ weight.get(j) * value);
valueWeight.set(idx, valueWeight.get(idx) + weight.get(j));
squaredValues.set(idx, squaredValues.get(idx)+weight.get(j)*value*value);
}
//define size of sortedMean
sortedMean=new ArrayList<>(discNoValues.get(i));
sortedMean=initArrSortRec(discNoValues.get(i));
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 0 ; j < discNoValues.get(i); j++){
if (valueWeight.get(j) > epsilon){
sortedMean.get(OKvalues).setKey(valueClass.get(j) / valueWeight.get(j));
sortedMean.get(OKvalues).setValue(j);
OKvalues++;
RightWeight +=valueWeight.get(j);
RightSquares += squaredValues.get(j);
RightValues +=valueClass.get(j);
}
}
totalWeight = RightWeight;
Collections.sort(sortedMean);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
int upper = OKvalues - 1;
for (j = 0 ; j < upper ; j++){
idx = sortedMean.get(j).getValue();
LeftSquares += squaredValues.get(idx);
LeftValues +=valueClass.get(idx);
LeftWeight +=valueWeight.get(idx);
RightSquares -=squaredValues.get(idx);
RightValues -=valueClass.get(idx);
RightWeight -=valueWeight.get(idx);
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow((LeftValues / LeftWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow((RightValues / RightWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate)
bestEstimate = estimate;
}
DiscEstimation.set(i, - bestEstimate);
}
return DiscEstimation;
}
public double mseDiscreteAttr(int attrIdx, Instances data){
int j, idx, OKvalues;
double totalWeight, bestEstimate, estimate, pLeft, variance, LeftValues, LeftSquares, LeftWeight, RightValues, RightSquares, RightWeight, value;
int trainSize=data.numInstances();
weight=calculateWeights(data);
ArrayList<Double> valueClass = new ArrayList<>();
ArrayList<Double> valueWeight = new ArrayList<>();
ArrayList<Double> squaredValues = new ArrayList<>();
ArrayList<SortRec> sortedMean = new ArrayList<>();
discNoValues=new ArrayList<>(Collections.nCopies(data.numAttributes()-1, 0)); //withouth class attribute
//estimationReg of discrete attributte
discNoValues.set(attrIdx, data.numDistinctValues(attrIdx));
valueClass=new ArrayList<>(Collections.nCopies(data.numDistinctValues(attrIdx), 0.0));
valueWeight=new ArrayList<>(Collections.nCopies(data.numDistinctValues(attrIdx), 0.0));
squaredValues=new ArrayList<>(Collections.nCopies(data.numDistinctValues(attrIdx), 0.0));
for (j = 0 ; j < trainSize ; j++){
idx = (int) data.instance(j).value(attrIdx);
value = data.instance(j).classValue();
valueClass.set(idx, valueClass.get(idx)+ weight.get(j) * value);
valueWeight.set(idx, valueWeight.get(idx) + weight.get(j));
squaredValues.set(idx, squaredValues.get(idx)+weight.get(j)*value*value);
}
sortedMean=new ArrayList<>(discNoValues.get(attrIdx));
sortedMean=initArrSortRec(discNoValues.get(attrIdx));
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 0 ; j < discNoValues.get(attrIdx); j++){
if (valueWeight.get(j) > epsilon){
sortedMean.get(OKvalues).setKey(valueClass.get(j) / valueWeight.get(j));
sortedMean.get(OKvalues).setValue(j);
OKvalues++;
RightWeight +=valueWeight.get(j);
RightSquares += squaredValues.get(j);
RightValues +=valueClass.get(j);
}
}
totalWeight = RightWeight;
Collections.sort(sortedMean);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
int upper = OKvalues - 1;
for (j = 0 ; j < upper ; j++){
idx = sortedMean.get(j).getValue();
LeftSquares += squaredValues.get(idx);
LeftValues +=valueClass.get(idx);
LeftWeight +=valueWeight.get(idx);
RightSquares -=squaredValues.get(idx);
RightValues -=valueClass.get(idx);
RightWeight -=valueWeight.get(idx);
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow((LeftValues / LeftWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow((RightValues / RightWeight),2);
if (LeftWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate)
bestEstimate = estimate;
}
return - bestEstimate;
}
public double mseNumericAttr(int attrIdx, Instances data){
int j, idx, OKvalues;
double totalWeight, bestEstimate, estimate, pLeft, variance, LeftValues, LeftSquares, LeftWeight, RightValues, RightSquares, RightWeight;
int trainSize=data.numInstances();
double splitPoint=Double.MAX_VALUE;
//continuous values
double dVal;
int numOfMissing=data.attributeStats(attrIdx).missingCount;
ArrayList<SortRec> sortedAttr = new ArrayList<SortRec>(trainSize-numOfMissing);
sortedAttr=initArrSortRec(trainSize-numOfMissing);
RightWeight = RightSquares = RightValues = 0.0;
OKvalues = 0;
for (j = 0 ; j < trainSize ; j++){
if(Double.isNaN(data.instance(j).value(attrIdx)))
continue;
sortedAttr.get(OKvalues).setKey(data.instance(j).value(attrIdx));
sortedAttr.get(OKvalues).setValue(j);
RightWeight += data.instance(j).weight();
dVal = data.instance(j).weight() * data.instance(j).classValue();
RightValues += dVal;
dVal *= data.instance(j).classValue();
RightSquares += dVal;
OKvalues++;
}
totalWeight = RightWeight;
Collections.sort(sortedAttr);
bestEstimate = Double.MAX_VALUE;
LeftWeight = LeftSquares = LeftValues = 0.0;
j = 0;
while (j < OKvalues){
//collect cases with the same value of the attribute - we cannot split between them
do{
idx = sortedAttr.get(j).getValue();
dVal = data.instance(idx).weight() * data.instance(idx).classValue();
LeftValues += dVal;
RightValues -= dVal;
dVal *= data.instance(idx).classValue();
LeftSquares += dVal;
RightSquares -= dVal;
LeftWeight += data.instance(idx).weight();
RightWeight -= data.instance(idx).weight();
j++;
}while (j < OKvalues && sortedAttr.get(j).getKey() == sortedAttr.get(j-1).getKey());
if (j == OKvalues)
break;
pLeft = LeftWeight / totalWeight;
variance = LeftSquares / LeftWeight - Math.pow(LeftValues / LeftWeight,2);
if (LeftWeight > epsilon && variance > 0.0)
estimate = pLeft * variance;
else
estimate = 0.0;
variance = RightSquares / RightWeight - Math.pow(RightValues / RightWeight,2);
if (RightWeight > epsilon && variance > 0.0)
estimate += (1.0 - pLeft) * variance;
if (estimate < bestEstimate){
bestEstimate = estimate;
splitPoint=(sortedAttr.get(j).getKey() + sortedAttr.get(j-1).getKey()) / 2.0;
}
}
return - bestEstimate;
}
public static ArrayList<Double> calculateWeights(Instances data){
ArrayList<Double> weights = new ArrayList<Double>();
for(int i=0;i<data.numInstances();i++)
weights.add(data.instance(i).weight());
return weights;
}
public static ArrayList<SortRec> initArrSortRec(int size){
ArrayList<SortRec> tmp=new ArrayList<>(size);
for(int i=0;i<size;i++)
tmp.add(i, new SortRec());
return tmp;
}
} | 20,427 | 43.025862 | 145 | java |
featConstr | featConstr-master/src/featconstr/SortRec.java | package featconstr;
/**
*
* @author bostjan
*/
public class SortRec implements Comparable<SortRec>{
private int value;
private double key;
public SortRec(){
this.value=0;
this.key=0.0;
}
public SortRec(int value, double key){
this.value = value;
this.key = key;
}
public SortRec(int value){
this.value = value;
}
public SortRec(double key){
this.key = key;
}
public double getKey(){
return this.key;
}
public void setKey(double key){
this.key=key;
}
public void setValue(int value){
this.value=value;
}
public int getValue(){
return this.value;
}
@Override
public int compareTo(SortRec rec) {
return new Double(key).compareTo(rec.key);
}
}
| 813 | 14.653846 | 52 | java |
featConstr | featConstr-master/src/featconstr/Timer.java | package featconstr;
import java.util.concurrent.TimeUnit;
/**
*
* @author bostjan
*/
public class Timer{
long startTime, stopTime;
boolean running;
public Timer(){
//startTime = System.currentTimeMillis();
startTime = System.nanoTime();
running = true;
}
public void start(){
//startTime = System.currentTimeMillis();
startTime = System.nanoTime();
running = true;
}
public void stop(){
//stopTime = System.currentTimeMillis();
stopTime = System.nanoTime();
running = false;
}
public long diff(){
if (running)
//return System.currentTimeMillis()-startTime;
return System.nanoTime()-startTime;
else
return TimeUnit.NANOSECONDS.toMillis(stopTime-startTime);
}
public String toString(){ //for conversion from milli seconds
long diff = diff();
long millis = diff%1000;
long secs = (diff/1000)%60;
long mins = (diff/(1000*60))%60;
long hs = (diff/(1000*3600))%24;
long days = diff/(1000*3600*24);
if (days > 0)
return days+"d "+hs+"h "+mins+"m "+secs+"s "+millis+"ms";
if (hs > 0)
return hs+"h "+mins+"m "+secs+"s "+millis+"ms";
if (mins > 0)
return mins+"m "+secs+"s "+millis+"ms";
if (secs > 0)
return secs+"s "+millis+"ms";
return millis+"ms";
}
}
| 1,332 | 19.828125 | 67 | java |
featConstr | featConstr-master/src/featconstr/Visualize.java | package featconstr;
import org.sourceforge.jlibeps.epsgraphics.*;
import java.awt.*;
import java.io.*;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.text.DecimalFormat;
import weka.core.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
public class Visualize{
final static float DASH[] = {10.0f};
final static BasicStroke DASHED = new BasicStroke(1.0f,BasicStroke.CAP_BUTT,BasicStroke.JOIN_MITER,10.0f, DASH, 0.0f);
final static BasicStroke ROUNDED = new BasicStroke(1.0f,BasicStroke.CAP_ROUND,BasicStroke.JOIN_MITER,4.0f);
final static BasicStroke THICK = new BasicStroke(1.0f);
final static BasicStroke NORMAL = new BasicStroke(1.0f);
final static BasicStroke THIN = new BasicStroke(0.5f);
final static BasicStroke BOLD = new BasicStroke(2.0f);
static int VIS_SIZE = 500;
static int numDec=3; //number of decimal places in attribute importance image
//credits to Erik Štrumbelj
public static void modelVisualToFileAttrImptLine(String file, String modelName, String datasetName, Instances data, ArrayList<Double>[] dotsA, ArrayList<Double> dotsB[], boolean classification, int resolution, int classValueToExplain, String format, boolean fc){
int VIS_SIZE2 = 400;
int xBB = 595; //width of bounding box (A4)
int yBB = 842; //height of bounding box (A4)
String fontName = Font.SANS_SERIF;
String tmpVal;
Font myFont14 = new Font(fontName, Font.BOLD, 14);
Font myFont10 = new Font(fontName, Font.BOLD, 10);
Font myFont8 = new Font(fontName, Font.BOLD, 8);
Font myFont5 = new Font(fontName, Font.BOLD, 5);
// start drawing the picture
FileOutputStream finalImage;
EpsGraphics2D g;
try{
int coordX = 45; //left/right margin
int coordXlength = VIS_SIZE2 - 2 * coordX;
int coordY = 150;
int coordYlength = 100;
int sign_size = 2; //dot size when drawing contributions and stdev of contributions
int yImg = (data.numAttributes()-1)*(coordYlength+20)+34; //height of bounding box (A4) ... data.numAttributes()-1 ... we draw just attributes withouth class
finalImage = new FileOutputStream(file);
if(format.equals("A4"))
g = new EpsGraphics2D("Title", finalImage, 0, 0, xBB, yBB);
else
g = new EpsGraphics2D("Title", finalImage, 0, 0, VIS_SIZE2+20, data.numAttributes()*(coordYlength+20));
//center picture to bounding box
int xT=xBB/2-(VIS_SIZE2+20)/2;
int yT=yBB/2-yImg/2;
g.translate(xT,yT); //because of later transformation to pdf and png - to be in the center of the page
g.setFont(myFont14);
g.setColor(Color.BLACK);
//outer graph visualization
//dataset & model print
int width1,width2;
g.drawString("Dataset: "+datasetName,10,10);
g.drawString("Model: "+modelName,10,25);
width1 =g.getFontMetrics().stringWidth(("Model: "+modelName));
if(!classification){
width1=g.getFontMetrics().stringWidth(("Dataset: "+datasetName));
g.drawString("Resolution: " + resolution,10+width1+5,25);
}
else{
if(data.attribute(1).isNumeric()){
width2=g.getFontMetrics().stringWidth(("Explaining class: "+(new Instances(data,1,1)).instance(0).classAttribute().value(classValueToExplain)));
g.drawString("Resolution: " + resolution,10+width1+width2+5,25);
}
g.drawString("Explaining class: " + (new Instances(data,1,1)).instance(0).classAttribute().value(classValueToExplain),10+width1+5,25);
}
double max_val = -Double.MAX_VALUE;
double min_val = Double.MAX_VALUE;
ArrayList<Double> temp, temp2;
double d;
for(int i = 0; i < dotsA.length; i++){
temp = dotsA[i];
temp2 = dotsB[i]; //values that represent informativeness of attributes
for(int j = 0; j < temp.size() / 2; j++){
d = (temp.get(j*2+1));
if(d > max_val)
max_val = d;
if(d < min_val)
min_val = d;
d = (temp2.get(0));
if(d > max_val)
max_val = d;
if(d < min_val)
min_val = d;
}
}
max_val+=max_val*5/100.0; //we increase the maximum value of the x axis for better visibility
min_val+=min_val*5/100.0; //we increase the min value of the x axis in the negative direction - we increase the range
double maxX, minX, chunkSize, axisOffSet, y, x, x2;
int ylabels;
for(int i = 0; i < data.numAttributes() -1; i++){
maxX = Double.MIN_VALUE;
minX = Double.MAX_VALUE;
if(fc)
g.setFont(myFont8);
else
g.setFont(myFont10);
g.setColor(Color.GRAY);
g.drawString(data.attribute(i).name(),coordX + 2,(i-1)*(coordYlength+20) + coordY+20 - 5);
temp = dotsA[i];
temp2 = dotsB[i];
if(data.attribute(i).isNominal()){
maxX = data.attribute(i).numValues() - 1;
minX = 0;
}
else
for(int j = 0; j < temp.size() / 2; j++){
d = (temp.get(j*2));
if(d > maxX)
maxX = d;
if(d < minX)
minX = d;
}
//attribute text
g.setColor(Color.GRAY);
g.setFont(myFont10);
chunkSize = coordYlength/(max_val - min_val);
//base lines
g.setStroke(NORMAL);
g.drawLine(coordX,coordY+i*(coordYlength+20),coordX + coordXlength,coordY+i*(coordYlength+20)); //bottom line
g.drawLine(coordX,coordY+i*(coordYlength+20),coordX ,coordY - coordYlength+i*(coordYlength+20)); //left line of the rectangle
g.drawLine(coordX + coordXlength,coordY - coordYlength+i*(coordYlength+20),coordX + coordXlength,coordY+i*(coordYlength+20)); //right line of the rectangle
g.drawLine(coordX + coordXlength,coordY - coordYlength+i*(coordYlength+20),coordX ,coordY - coordYlength+i*(coordYlength+20)); //top line
//zero axis
axisOffSet = 0;
if(min_val < 0)
axisOffSet = min_val*chunkSize;
g.setStroke(DASHED);
g.drawLine(coordX,coordY+i*(coordYlength+20)+(int)axisOffSet,coordX+coordXlength,coordY+i*(coordYlength+20)+(int)axisOffSet);
//zero axis start and end numbers
if(data.attribute(i).isNominal()){
g.setFont(myFont5);
for(int j = 0; j < data.attribute(i).numValues();j++){
if(data.attribute(i).value(j).contains("-inf") || data.attribute(i).value(j).contains("All") || data.attribute(i).value(j).contains("_x_")){
tmpVal = data.attribute(i).value(j).replace("\\", "").replace("'","");
g.drawString(tmpVal,(int)(((j - minX) / (maxX - minX)) * +coordXlength) + coordX, coordY+i*(coordYlength+20)+(int)axisOffSet +10);
}
else
g.drawString(data.attribute(i).value(j),(int)(((j - minX) / (maxX - minX)) * +coordXlength) + coordX, coordY+i*(coordYlength+20)+(int)axisOffSet +10);
}
}
else{
g.setFont(myFont5);
g.drawString(roundDecimal2(minX),coordX -10,coordY+i*(coordYlength+20)+(int)axisOffSet +10);
g.drawString(roundDecimal2(maxX),coordX+coordXlength - 20,coordY+i*(coordYlength+20)+(int)axisOffSet +10);
}
//horizontal and vertical limiters
g.setFont(myFont8);
ylabels = 4;
for(int j = 0; j < ylabels+1; j++){
//labels on the x axis
g.drawLine(coordX+(int)(j*((double)coordXlength/ylabels)), coordY+i*(coordYlength+20)+(int)axisOffSet-5, coordX+(int)(j*((double)coordXlength/ylabels)), coordY+i*(coordYlength+20)+(int)axisOffSet+5);
//labels and values on the y axis
g.drawLine(coordX + coordXlength-5, coordY+i*(coordYlength+20)-(int)(j*((max_val - min_val)/ylabels)*chunkSize), coordX + coordXlength+5, coordY+i*(coordYlength+20)-(int)(j*((max_val - min_val)/ylabels)*chunkSize));
g.drawString(roundDecimal2(j*((max_val - min_val)/ylabels)+min_val), coordX + coordXlength+15, coordY + 3 + i*(coordYlength+20)-(int)(j*((max_val - min_val)/ylabels)*chunkSize));
}
for(int j = 0; j < temp.size() / 2; j++){
x = (temp.get(j*2));
y = (temp.get(j*2+1));
if(!data.attribute(i).isNominal()){
g.setColor(Color.BLACK);
g.fillOval((int)(((x - minX) / (maxX - minX)) * +coordXlength - (sign_size / 2.0)) + coordX, (int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY, sign_size, sign_size);
}
else{
sign_size+=2;
g.setStroke(BOLD);
g.setColor(Color.BLACK);
g.fillOval((int)(((x - minX) / (maxX - minX)) * +coordXlength - (sign_size / 2.0)) + coordX, (int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY, sign_size, sign_size);
sign_size-=2;
}
}
//draw attribute importance
x2=(temp.get(temp.size()-2));
y =(temp2.get(0));
g.setColor(Color.getHSBColor(121, 83, 54));
g.setStroke(NORMAL);
g.setFont(myFont8);
if(fc)
width1=g.getFontMetrics().stringWidth("Feat. importance: "+roundDecimal3((double)dotsB[i].get(0)));
else
width1=g.getFontMetrics().stringWidth("Attr. importance: "+roundDecimal3((double)dotsB[i].get(0)));
if(data.attribute(i).isNominal()){
g.drawLine(coordX, (int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY,
(int)((((data.attribute(i).numValues()-1) - minX) / (maxX - minX)) * +coordXlength) + coordX,
(int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY);
if(fc)
g.drawString("Feat. importance: "+roundDecimal3((double)dotsB[i].get(0)),
coordX+(int)(((double)coordXlength/2)-width1/2),
(int)((((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY)+15);
else
g.drawString("Attr. importance: "+roundDecimal3((double)dotsB[i].get(0)),
coordX+(int)(((double)coordXlength/2)-width1/2),
(int)((((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY)+15);
}
else{
g.drawLine(coordX, (int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY,
(int)(((x2 - minX) / (maxX - minX)) * +coordXlength) + coordX,
(int)(((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY);
if(fc)
g.drawString("Feat. importance: "+roundDecimal3((double)dotsB[i].get(0)),
coordX+(int)(((double)coordXlength/2)-width1/2),
(int)((((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY)+15);
else
g.drawString("Attr. importance: "+roundDecimal3((double)dotsB[i].get(0)),
coordX+(int)(((double)coordXlength/2)-width1/2),
(int)((((y - min_val) / (max_val - min_val)) * -coordYlength - sign_size / 2.0) + (i)*(coordYlength+20) + coordY)+15);
}
//horizontal line
g.setColor(Color.GRAY);
}
g.flush();
g.close();
finalImage.close();
}
catch(IOException e){
System.err.println("ERROR: "+e.toString());
}
}
public static void attrImportanceVisualizationSorted(String file, String modelName, String datasetName, Instances data, int drawLimit, ArrayList<Double> dotsB[], boolean classification, int resolution,String format, boolean fc){
int xBB = 595; //width of bounding box (A4)
int yBB = 842; //height of bounding box (A4)
int wBox=530; //width of the box for drawing ... VIS_SIZE is currently 500
int fontSize2 = 9;
int fontSize = 14;
int leadinY = 80; //the offset between the top edge and the label Feature Contribution in Value
int minY = leadinY;
int leadoutY = 70; //the distance between the end of the inscriptions (numbers on the axis) and the bottom edge
double perFeature = 30;
double ratio = 0.6;
int maxX = +150;
int minX = 0;
int textLeft = -10; //-10 because we then add +20
String fontName = Font.SANS_SERIF;
Font myFont = new Font(fontName, Font.BOLD, fontSize);
double threshold=0.03;
Map<Double, String> treemap = new TreeMap<>();
for(int i=0;i<dotsB.length;i++){
treemap.put((double)dotsB[i].get(0), data.attribute(i).name());
}
//sort (descending) map based on attr. importance
Map<Double, String> newMap = new TreeMap<>(Collections.reverseOrder());
newMap.putAll(treemap);
if(newMap.size()>drawLimit){
System.out.println("Drawing limit for attribute name: "+newMap.get(newMap.keySet().toArray()[drawLimit-1])); //attribute name
System.out.printf("Drawing limit for value: %.4f\n",newMap.keySet().toArray()[drawLimit-1]); //attribute value
threshold=Double.parseDouble(String.valueOf(newMap.keySet().toArray()[drawLimit-1]));
newMap.keySet().removeAll(Arrays.asList(newMap.keySet().toArray()).subList(drawLimit, newMap.size()-1)); //subList(fromIndex - inclusive, toIndex - exclusive)
}
else
threshold=Double.parseDouble(String.valueOf(newMap.keySet().toArray()[newMap.size()-1]));
int relevantFeatures = 0;
double maxContrib = 0;
for(int i = 0; i < dotsB.length; i++){
if(Math.abs((double)dotsB[i].get(0)) >= threshold)
relevantFeatures++;
if(Math.abs((double)dotsB[i].get(0)) >= maxContrib)
maxContrib = Math.abs((double)dotsB[i].get(0));
}
int TOTAL_Y = (int)(leadinY + perFeature * relevantFeatures + leadoutY);
double MAX_Y = leadinY + perFeature * relevantFeatures; //vertical line between positive and negative part
FileOutputStream finalImage;
EpsGraphics2D g;
try{
finalImage= new FileOutputStream(file);
g = new EpsGraphics2D("Title", finalImage, 0,0, xBB, yBB); //A4 beacause if we later convert eps to pdf and png (parameters are set to center image on A4)
//center picture to bounding box
int xT=xBB/2-wBox/2;
int yT=yBB/2-TOTAL_Y/2;
g.translate(xT,yT); //because of later transformation to pdf and png - to be in the center of the page
g.setFont(myFont);
g.setStroke(THICK);
g.setColor(Color.BLACK);
g.drawLine(1,1,1,TOTAL_Y);
g.drawLine(wBox,1,wBox,TOTAL_Y);
g.drawLine(1,TOTAL_Y,wBox,TOTAL_Y);
g.drawLine(1,1,wBox,1);
g.setColor(Color.BLACK);
g.drawRect(380,3,148,22);
g.drawString("Data: " + datasetName,10,20);
g.drawString("Model: " + modelName,10,40);
if(fc){
g.drawString("Feature importance",385,20);
g.drawString("Feature",15,getY(minY-10));
}
else{
g.drawString("Attribute importance",385,20);
g.drawString("Attribute",15,getY(minY-10));
}
g.drawString("Value",getX(maxX + 35) + 30,getY(minY-10));
g.drawString("Importance",getX(0-42),getY(minY-10));
g.setStroke(ROUNDED);
int counter = 0;
BigDecimal bd;
Font tempFontA, tempFontV;
String attrName, padded, attVal;
double value, yText, y, y2, barH, barTop, x2;
for(Entry<Double, String> entry : newMap.entrySet()){
value = entry.getKey();
attrName = entry.getValue();
if(value >= threshold){
//text for feature
int textSize = fontSize2;
tempFontA = new Font(Font.MONOSPACED, Font.BOLD, textSize-3);
tempFontV = new Font(Font.MONOSPACED, Font.BOLD, textSize);
bd = new BigDecimal(value).setScale(numDec, RoundingMode.HALF_UP);
padded = String.format("%-5s", bd.doubleValue()).replace(' ', '0'); //rpad
attVal = padded;
yText = perFeature*(counter) + minY;
g.setColor(Color.BLACK);
g.setFont(tempFontV);
if(fc)
g.drawString("Feat " +(counter+1)+ " ", textLeft+20,getY(yText+fontSize+3));
else
g.drawString(attrName + " ", textLeft+20,getY(yText+fontSize+3));
g.setFont(tempFontA);
if(fc){
g.setFont(tempFontA);
g.drawString(attrName + " ", textLeft+40,getY(yText+fontSize+17));
}
g.setFont(tempFontV);
g.drawString(" " + formatValue(attVal,13,numDec), getX(maxX + 5) + 30,getY(yText+fontSize+3));
//bar for feature
y = perFeature*(counter) + minY;
y2 = perFeature*(counter+1) + minY;
g.setStroke(DASHED);
g.setColor(Color.GRAY);
if(!fc)
g.drawLine(VIS_SIZE/4,getY(y2),getX(maxX),getY(y2));
g.setStroke(NORMAL);
barH = (int)(perFeature * ratio);
barTop = y + (perFeature- barH) / 2;
x2 = Math.abs((value/maxContrib) * (getX(maxX)-VIS_SIZE/4));
g.fillRect(VIS_SIZE/4,getY(barTop),(int)Math.ceil(x2),(int)barH);
g.setColor(Color.BLACK);
g.drawRect(VIS_SIZE/4,getY(barTop),(int)Math.ceil(x2),(int)barH);
}
if(Math.abs(value) >= threshold)
counter++;
}
y = perFeature*(0) + minY;
g.setStroke(DASHED);
if(!fc)
g.drawLine(VIS_SIZE/4,getY(y),getX(maxX),getY(y)); //first dashed line
//axis & scale
g.setStroke(NORMAL);
Font tempFont2 = new Font(Font.MONOSPACED, Font.BOLD, fontSize2-1);
g.setFont(tempFont2);
g.drawLine(VIS_SIZE/4,getY(MAX_Y + 20),getX(maxX),getY(MAX_Y + 20)); //bottom black line ... the axis where we place values
String[] tick = new String[3];
tick[0] = ""+minX;
tick[1] = roundDecimal2(maxContrib/2);
tick[2] = roundDecimal2(maxContrib);
//drawing labels on the X axis
//first vertical line 0
int width =g.getFontMetrics().stringWidth((tick[0]));
g.drawLine(VIS_SIZE/4,getY(MAX_Y + 24),VIS_SIZE/4,getY(MAX_Y + 20)); //vertical lines on the axis
g.drawString(tick[0],VIS_SIZE/4-width/2,getY(MAX_Y + 24+20) );
//the second half of the vertical line
width=g.getFontMetrics().stringWidth((tick[1]));
g.drawLine(VIS_SIZE/4+(getX(maxX)-VIS_SIZE/4)/2,getY(MAX_Y + 24),VIS_SIZE/4+(getX(maxX)-VIS_SIZE/4)/2,getY(MAX_Y + 20)); //vertical lines on the axis
g.drawString(tick[1],VIS_SIZE/4+(getX(maxX)-VIS_SIZE/4)/2-width/2,getY(MAX_Y + 24+20) );
//third vertical line maximum
width=g.getFontMetrics().stringWidth((tick[2]));
g.drawLine(getX(maxX),getY(MAX_Y + 24),getX(maxX),getY(MAX_Y + 20)); //vertical lines on the axis
g.drawString(tick[2],getX(maxX)-width/2,getY(MAX_Y + 24+20) ); //-width/2 to center number to axis
g.flush();
g.close();
finalImage.close();
}
catch (IOException e){
System.out.println("ERROR: "+e.toString());
}
}
//credits to Erik Štrumbelj
public static void instanceVisualizationToFile(String file, String modelName, String datasetName, Instances instance, int id, int topHigh, double[] contributions, double prediction, int classValueToExplain, boolean isClassification, boolean fc){
int xBB = 595; //width of bounding box (A4)
int yBB = 842; //height of bounding box (A4)
int wBox=530; //width of the box for drawing
int fontSize2 = 14;
int fontSize = 14;
int leadinY = 170; //the offset between the top edge and the label Feature Contribution in Value
int minY = leadinY;
int leadoutY = 70; //the distance between the end of the inscriptions (numbers on the axis) and the bottom edge
double perFeature = 30;
double ratio = 0.6;
int maxX = +150;
int minX = -150;
int textLeft = -10; //-10 because we then add +20
String fontName = Font.SANS_SERIF;
Font myFont = new Font(fontName, Font.BOLD, fontSize);
double threshold=-1;
//threshold calculation
double contrCp[]=contributions.clone();
for(int i = 0; i < contrCp.length; i++){
if(contrCp[i]<0)
contrCp[i]=Math.abs(contrCp[i]);
}
Arrays.sort(contrCp);
if(contrCp.length>topHigh)
threshold=contrCp[contrCp.length-topHigh];
int relevantFeatures = 0;
double maxContrib = 0;
for(int i = 0; i < contributions.length; i++){
if(Math.abs(contributions[i]) >= threshold)
relevantFeatures++;
if(Math.abs(contributions[i]) >= maxContrib)
maxContrib = Math.abs(contributions[i]);
}
int TOTAL_Y = (int)(leadinY + perFeature * relevantFeatures + leadoutY);
double MAX_Y = leadinY + perFeature * relevantFeatures; //vertical line between positive and negative part
FileOutputStream finalImage;
EpsGraphics2D g;
try{
finalImage = new FileOutputStream(file);
g = new EpsGraphics2D("Title", finalImage, 0,0, xBB, yBB); //A4 beacause if we later convert eps to pdf and png (parameters are set to center image on A4)
//center picture to bounding box
int xT=xBB/2-wBox/2;
int yT=yBB/2-TOTAL_Y/2;
g.translate(xT,yT); //because of later transformation to pdf and png - to be in the center of the page
g.setFont(myFont);
g.setStroke(THICK);
g.setColor(Color.BLACK);
g.drawLine(1,1,1,TOTAL_Y);
g.drawLine(wBox,1,wBox,TOTAL_Y);
g.drawLine(1,TOTAL_Y,wBox,TOTAL_Y);
g.drawLine(1,1,wBox,1);
g.setColor(Color.BLACK);
g.drawRect(380,3,148,22);
g.drawString("Data: " + datasetName,10,20);
g.drawString("Model: " + modelName,10,40);
g.drawString("Instance No.: " + id,10,60);
g.drawString("Instance Explanation",385,20);
if(isClassification){
String actValue= instance.instance(0).classAttribute().value((int)instance.instance(0).classValue()).replace(',','.');
String predStr=""+((prediction==0 || prediction==1)? (int)prediction : FeatConstr.rnd3(prediction));
g.drawString("Explaining class: " + instance.instance(0).classAttribute().value(classValueToExplain) +" Prediction: p(class = "+instance.instance(0).classAttribute().value(classValueToExplain)+"|x)= "+predStr,10,100);
g.drawString("Actual value for this instance: class = " + actValue,10,120);
}
else{
g.drawString("Prediction: p = " + roundDecimal2(prediction).replace(',','.'),10,100);
g.drawString("Actual value for this instance: " + roundDecimal2(instance.instance(0).value(contributions.length)).replace(',','.'),10,120);
}
if(fc)
g.drawString("Feature",15,getY(minY-10));
else
g.drawString("Attribute",15,getY(minY-10));
g.drawString("Value",getX(maxX + 35) + 30,getY(minY-10));
g.drawString("Contribution",getX(0-42),getY(minY-10));
double yA = perFeature*(0) + minY;
g.setStroke(DASHED);
g.setColor(Color.GRAY);
g.drawLine(getX(minX),getY(yA),getX(maxX),getY(yA)); //first dashed line
//axis & scale
g.setStroke(NORMAL);
g.drawLine(getX(0),getY(MAX_Y),getX(0),getY(minY)); //y-axis
Font tempFont2 = new Font(Font.MONOSPACED, Font.BOLD, fontSize2-1);
g.setFont(tempFont2);
g.setColor(Color.BLACK);
g.drawLine(getX(minX),getY(MAX_Y + 20),getX(maxX),getY(MAX_Y + 20)); //x-axis
String[] tick = new String[5];
tick[0] = "-"+roundDecimal2(maxContrib);
tick[1] = "-"+roundDecimal2(maxContrib/2);
tick[2] = " 0";
tick[3] = roundDecimal2(maxContrib/2);
tick[4] = roundDecimal2(maxContrib);
for(int k = 0; k < 5; k++){
g.drawLine(getX(((maxX - minX) / 4)*k-maxX),getY(MAX_Y + 24),getX(((maxX - minX) / 4)*k-maxX),getY(MAX_Y + 20));
g.drawString(tick[k],getX(((maxX - minX) / 4)*(k-0.3)-maxX+3),getY(MAX_Y + 24+20) );
}
g.setStroke(ROUNDED);
int counter = 0;
int textSize;
String attVal;
boolean writeOnce=true;
Font tempFont, tempFontA, tempFontV, tempFontZ;
double yText, y, y2, barH, barTop, x1, x2 ;
int costumOffsetX;
for(int i = 0; i < contributions.length; i++){
if(Math.abs(contributions[i]) >= threshold){
//text for feature
textSize = fontSize2;
tempFont = new Font(Font.MONOSPACED, Font.BOLD, textSize-3);
g.setFont(tempFont);
attVal = instance.instance(0).toString(i);
yText = perFeature*(counter) + minY;
g.setColor(Color.BLACK);
if(fc){
textSize=8;
tempFont = new Font(Font.MONOSPACED, Font.BOLD, textSize-3);
g.setFont(tempFont);
}
textSize = 9;
tempFontA = new Font(Font.MONOSPACED, Font.BOLD, textSize-4);
tempFontV = new Font(Font.MONOSPACED, Font.BOLD, textSize);
g.setFont(tempFontV);
if(fc)
g.drawString("Feat " +(counter+1)+ " ", textLeft+20,getY(yText+fontSize+3));
else
g.drawString(instance.attribute(i).name() + " ", textLeft+20,getY(yText+fontSize+3));
g.setFont(tempFontA);
if(fc){
g.setFont(tempFontA);
g.drawString(instance.attribute(i).name() + " ", textLeft+40,getY(yText+fontSize+15));
}
g.setFont(tempFontV);
textSize=14;
tempFont = new Font(Font.MONOSPACED, Font.BOLD, textSize-3);
if(attVal.contains("-inf") || attVal.contains("All") || attVal.contains("_x_")){ //Cartesian product value
if(writeOnce){
tempFontZ = new Font(Font.MONOSPACED, Font.BOLD, 6);
g.setFont(tempFontZ);
g.drawString("*",10,getY(MAX_Y + 60) );
g.setColor(Color.GRAY);
g.drawString("Cartesian product value",14,getY(MAX_Y + 63) );
writeOnce=false;
}
g.setFont(tempFont);
g.setColor(Color.BLACK);
attVal = attVal.replace("\\", "").replace("'","");
g.drawString("*", getX(maxX + 5) + 68,getY(yText+fontSize+4)); //value e.g. 1-2 is from Cartesian product after FC '-' is instead of 'x'
tempFontZ = new Font(Font.MONOSPACED, Font.BOLD, 4);
g.setFont(tempFontZ);
g.drawString(attVal, getX(maxX + 5)+69-(attVal.length()),getY(yText+fontSize+15)); //value e.g. 1-2 is from Cartesian product after FC '-' is instead of 'x'
}
else
g.drawString(" " + formatValue(attVal,13,numDec), getX(maxX + 5) + 30,getY(yText+fontSize+4)); //value e.g. 1-2 is from Cartesian product after FC '-' is instead of 'x'
g.setFont(tempFont);
//bar for feature
y = perFeature*(counter) + minY;
y2 = perFeature*(counter+1) + minY;
g.setStroke(DASHED);
g.setColor(Color.GRAY);
g.drawLine(getX(minX),getY(y2),getX(maxX),getY(y2));
g.setStroke(NORMAL);
g.setColor(Color.GRAY);
barH = (int)(perFeature * ratio);
barTop = y + (perFeature- barH) / 2;
x1 = Math.min((contributions[i]/maxContrib) * maxX,0);
x2 = Math.abs((contributions[i]/maxContrib) * maxX);
g.fillRect(getX(x1),getY(barTop),(int)Math.ceil(x2),(int)barH);
g.setColor(Color.BLACK);
g.drawRect(getX(x1),getY(barTop),(int)Math.ceil(x2),(int)barH);
costumOffsetX = -45; //offset for box with values/contributions
g.setStroke(ROUNDED);
g.setColor(Color.WHITE);
g.fillRect(getX(maxX - 40-costumOffsetX),getY(yText+9), 48, 12);
g.setColor(Color.BLACK);
g.drawRect(getX(maxX - 40-costumOffsetX),getY(yText+9), 48, 12);
g.drawString(padLeft(roundDecimal3(contributions[i]).replace(',','.'), " ", 8),getX(maxX - 48-costumOffsetX),getY(yText+18.5)); //contribution for each attribute
g.setStroke(NORMAL);
}
if(Math.abs(contributions[i]) >= threshold)
counter++;
}
g.flush();
g.close();
finalImage.close();
}
catch (IOException e){
System.out.println("ERROR: "+e.toString());
}
}
public static String roundDecimal2(double d) {
DecimalFormat twoDForm = new DecimalFormat("#.##");
if(twoDForm.format(d).equals("-0"))
return "0";
else
return twoDForm.format(d).replace(",", ".");
}
public static String roundDecimal3(double d) {
DecimalFormat twoDForm = new DecimalFormat("#.###");
if(twoDForm.format(d).equals("-0"))
return "0";
else
return twoDForm.format(d).replace(",", ".");
}
private static int getY(double y){
return (int)(y);
}
private static int getX(double y){
return (int)((VIS_SIZE / 2) + y);
}
public static String formatValue(String s, int size, int decPlaces){
boolean inDecimal = false;
int[] remove = new int[s.length()];
int counter = 0;
for(int i = 0; i < s.length(); i++){
if(inDecimal)
counter++;
if(Character.isDigit(s.charAt(i))){
if (counter > decPlaces) remove[i] = 1;
}
else{
inDecimal = false;
counter = 0;
}
if(s.charAt(i) == '.' && !inDecimal){
inDecimal = true;
counter = 0;
}
}
String sNew = "";
for(int i = 0; i < s.length(); i++)
if(remove[i] != 1)
sNew += s.charAt(i);
s = sNew.replace("\\", "").replace("'","");
while(s.length() < size){
s = " " + s + " ";
}
if(s.length() > size)
return s.substring(0,size);
return s;
}
public static String padLeft(String s, String c, int size){
while(s.length() < size)
s = c + s;
return s;
}
} | 34,640 | 48.62894 | 266 | java |
featConstr | featConstr-master/src/featconstr/XGBoost.java | package featconstr;
import biz.k11i.xgboost.Predictor; //xgboost-predictor-0.3.17
import biz.k11i.xgboost.util.FVec; //xgboost-predictor-0.3.17
import ml.dmlc.xgboost4j.java.Booster;
import ml.dmlc.xgboost4j.java.DMatrix;
import ml.dmlc.xgboost4j.java.XGBoostError;
import weka.classifiers.AbstractClassifier;
import weka.core.*;
import weka.core.Capabilities.Capability;
import java.io.ByteArrayInputStream;
import java.io.Serializable;
import java.util.*;
/**
* <!-- globalinfo-start -->
* * Class for a XGBoost classifier.
* * <br><br>
* <!-- globalinfo-end -->
* <p>
* <!-- technical-bibtex-start -->
* * BibTeX:
* * <pre>
* * @misc{missing_id
* * }
* * </pre>
* * <br><br>
* <!-- technical-bibtex-end -->
* <p>
* <!-- options-start -->
* * Valid options are: <p>
* *
* * <pre> -num_round <integer>
* * Number of boosting iterations</pre>
* *
* * <pre> -force-probability-distribution
* * Force probability distribution</pre>
* *
* * <pre> -use-predictor
* * Use predictor for inline (single instance) predictions.</pre>
* *
* * <pre> -booster <string>
* * [default=gbtree]
* * which booster to use, can be gbtree, gblinear or dart. gbtree and dart use tree based model while gblinear uses linear function</pre>
* *
* * <pre> -silent <integer>
* * [default=0]
* * 0 means printing running messages, 1 means silent mode.</pre>
* *
* * <pre> -nthread <integer>
* * [default to maximum number of threads available if not set]
* * number of parallel threads used to run xgboost</pre>
* *
* * <pre> -num_pbuffer <integer>
* * [set automatically by xgboost, no need to be set by user]
* * size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step</pre>
* *
* * <pre> -num_feature <integer>
* * [set automatically by xgboost, no need to be set by user]
* * feature dimension used in boosting, set to maximum dimension of the feature</pre>
* *
* * <pre> -eta <double>
* * [default=0.3, alias: learning_rate], range: [0,1]
* * step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features. and eta actually shrinks the feature weights to make the boosting process more conservative.</pre>
* *
* * <pre> -learning_rate <double>
* * [default=0.3, alias: eta]. </pre>
* *
* * <pre> -gamma <double>
* * [default=0, alias: min_split_loss], range: [0,∞]
* * minimum loss reduction required to make a further partition on a leaf node of the tree. The larger, the more conservative the algorithm will be.</pre>
* *
* * <pre> -min_split_loss <double>
* * alias: gamma</pre>
* *
* * <pre> -max_depth <integer>
* * [default=6], range: [1,∞]
* * maximum depth of a tree, increase this value will make the model more complex / likely to be overfitting.</pre>
* *
* * <pre> -min_child_weight <double>
* * [default=1], range: [0,∞]
* * minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be.</pre>
* *
* * <pre> -max_delta_step <double>
* * [default=0], range: [0,∞]
* * Maximum delta step we allow each tree's weight estimation to be. If the value is set to 0, it means there is no constraint. If it is set to a positive value, it can help making the update step more conservative. Usually this parameter is not needed, but it might help in logistic regression when class is extremely imbalanced. Set it to value of 1-10 might help control the update</pre>
* *
* * <pre> -subsample <double>
* * [default=1], range: (0,1],
* * subsample ratio of the training instance. Setting it to 0.5 means that XGBoost randomly collected half of the data instances to grow trees and this will prevent overfitting.</pre>
* *
* * <pre> -colsample_bytree <double>
* * [default=1], range: (0,1]
* * subsample ratio of columns when constructing each tree.</pre>
* *
* * <pre> -colsample_bylevel <double>
* * [default=1], range: (0,1]
* * subsample ratio of columns for each split, in each level.</pre>
* *
* * <pre> -lambda <double>
* * [default=1, alias: reg_lambda]
* * L2 regularization term on weights, increase this value will make model more conservative.</pre>
* *
* * <pre> -reg_lambda <double>
* * </pre>
* *
* * <pre> -alpha <double>
* * [default=0, alias: reg_alpha]
* * L1 regularization term on weights, increase this value will make model more conservative.</pre>
* *
* * <pre> -reg_alpha <double>
* * </pre>
* *
* * <pre> -tree_method <string>
* * [default='auto'], The tree construction algorithm used in XGBoost; Choices: {'auto', 'exact', 'approx'} </pre>
* *
* * <pre> -sketch_eps <double>
* * [default=0.03], range: (0, 1)
* * Only used for approximate greedy algorithm. This roughly translated into O(1 / sketch_eps) number of bins. Compared to directly select number of bins, this comes with theoretical guarantee with sketch accuracy. Usually user does not have to tune this. but consider setting to a lower number for more accurate enumeration.</pre>
* *
* * <pre> -scale_pos_weight <double>
* * [default=1]
* * Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases)</pre>
* *
* * <pre> -updater <string>
* * [default='grow_colmaker,prune']
* * A comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters.</pre>
* *
* * <pre> -refresh_leaf <integer>
* * [default=1]
* * This is a parameter of the 'refresh' updater plugin. When this flag is true, tree leafs as well as tree nodes' stats are updated. When it is false, only node stats are updated.</pre>
* *
* * <pre> -process_type <string>
* * </pre>
* *
* * <pre> -sample_type <string>
* * [default="uniform"]
* * type of sampling algorithm:
* * -"uniform": dropped trees are selected uniformly.
* * -"weighted": dropped trees are selected in proportion to weight.</pre>
* *
* * <pre> -normalize_type <string>
* * [default="tree"]
* * type of normalization algorithm:
* * -"tree": new trees have the same weight of each of dropped trees.
* * -"forest": new trees have the same weight of sum of dropped trees (forest).</pre>
* *
* * <pre> -rate_drop <double>
* * [default=0.0], range: [0.0, 1.0]
* * dropout rate (a fraction of previous trees to drop during the dropout)</pre>
* *
* * <pre> -one_drop <integer>
* * [default=0]
* * when this flag is enabled, at least one tree is always dropped during the dropout (allows Binomial-plus-one or epsilon-dropout from the original DART paper).</pre>
* *
* * <pre> -skip_drop <double>
* * [default=0.0], range: [0.0, 1.0]
* * Probability of skipping the dropout procedure during a boosting iteration.</pre>
* *
* * <pre> -lambda_bias <double>
* * [default=0, alias: reg_lambda_bias]
* * L2 regularization term on bias (no L1 reg on bias because it is not important)</pre>
* *
* * <pre> -reg_lambda_bias <double>
* * </pre>
* *
* * <pre> -objective <string>
* * [ default=reg:linear ]
* * "reg:linear" --linear regression
* * "reg:logistic" --logistic regression
* * "binary:logistic" --logistic regression for binary classification, output probability
* * "binary:logitraw" --logistic regression for binary classification, output score before logistic transformation
* * "count:poisson" --poisson regression for count data, output mean of poisson distribution [max_delta_step is set to 0.7 by default in poisson regression (used to safeguard optimization)]
* * "multi:softmax" --set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
* * "multi:softprob" --same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
* * "rank:pairwise" --set XGBoost to do ranking task by minimizing the pairwise loss
* * "reg:gamma" --gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be gamma-distributed
* * "reg:tweedie" --Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be Tweedie-distributed.</pre>
* *
* * <pre> -num_class <integer>
* * number of classes</pre>
* *
* * <pre> -base_score <double>
* * [ default=0.5 ]
* * -the initial prediction score of all instances, global bias
* * -for sufficient number of iterations, changing this value will not have too much effect.</pre>
* *
* * <pre> -eval_metric <string>
* * [ default according to objective ]
* * evaluation metrics for validation data, a default metric will be assigned according to objective (rmse for regression, and error for classification, mean average precision for ranking )</pre>
* *
* * <pre> -seed <integer>
* * [ default=0 ]
* * random number seed.</pre>
* *
* * <pre> -tweedie_variance_power <double>
* * [default=1.5], range: (1,2)
* * parameter that controls the variance of the Tweedie distribution
* * -set closer to 2 to shift towards a gamma distribution
* * -set closer to 1 to shift towards a Poisson distribution.</pre>
* *
* * <pre> -output-debug-info
* * If set, classifier is run in debug mode and
* * may output additional info to the console</pre>
* *
* * <pre> -do-not-check-capabilities
* * If set, classifier capabilities are not checked before classifier is built
* * (use with caution).</pre>
* *
* * <pre> -num-decimal-places
* * The number of decimal places for the output of numbers in the model (default 2).</pre>
* *
* * <pre> -batch-size
* * The desired batch size for batch prediction (default 100).</pre>
* *
* <!-- options-end -->
*
* @author Michal Wasiluk ([email protected])
*/
public class XGBoost extends AbstractClassifier implements OptionHandler, TechnicalInformationHandler, Serializable {
private static final long serialVersionUID = 1141447363965993342L;
private Booster booster;
Map<String, Object> params = new HashMap<>();
static List<OptionWithType> xgBoostParamsOptions = new ArrayList<>();
static Set<String> probabilityObjective = new HashSet<>(Arrays.asList("binary:logistic", "multi:softprob"));
private boolean forceProbabilityDistribution = false;
static {
// General Parameters
addStringOption("booster", "[default=gbtree]\nwhich booster to use, can be gbtree, gblinear or dart. gbtree and dart use tree based model while gblinear uses linear function");
addIntOption("silent", "[default=0]\n0 means printing running messages, 1 means silent mode.");
addIntOption("nthread", "[default to maximum number of threads available if not set]\nnumber of parallel threads used to run xgboost");
addIntOption("num_pbuffer", "[set automatically by xgboost, no need to be set by user]\nsize of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step");
addIntOption("num_feature", "[set automatically by xgboost, no need to be set by user]\nfeature dimension used in boosting, set to maximum dimension of the feature");
//Parameters for Tree Booster
addDoubleOption("eta", "[default=0.3, alias: learning_rate], range: [0,1]\nstep size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features. and eta actually shrinks the feature weights to make the boosting process more conservative.");
addDoubleOption("learning_rate", "[default=0.3, alias: eta]. ");
addDoubleOption("gamma", "[default=0, alias: min_split_loss], range: [0,∞]\nminimum loss reduction required to make a further partition on a leaf node of the tree. The larger, the more conservative the algorithm will be.");
addDoubleOption("min_split_loss", "alias: gamma");
addIntOption("max_depth", "[default=6], range: [1,∞]\nmaximum depth of a tree, increase this value will make the model more complex / likely to be overfitting.");
addDoubleOption("min_child_weight", "[default=1], range: [0,∞]\nminimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be.");
addDoubleOption("max_delta_step", "[default=0], range: [0,∞]\nMaximum delta step we allow each tree's weight estimation to be. If the value is set to 0, it means there is no constraint. If it is set to a positive value, it can help making the update step more conservative. Usually this parameter is not needed, but it might help in logistic regression when class is extremely imbalanced. Set it to value of 1-10 might help control the update");
addDoubleOption("subsample", " [default=1], range: (0,1], \n" +
"subsample ratio of the training instance. Setting it to 0.5 means that XGBoost randomly collected half of the data instances to grow trees and this will prevent overfitting.");
addDoubleOption("colsample_bytree", "[default=1], range: (0,1]\nsubsample ratio of columns when constructing each tree.");
addDoubleOption("colsample_bylevel", "[default=1], range: (0,1]\nsubsample ratio of columns for each split, in each level.");
addDoubleOption("lambda", "[default=1, alias: reg_lambda]\nL2 regularization term on weights, increase this value will make model more conservative.");
addDoubleOption("reg_lambda");
addDoubleOption("alpha", "[default=0, alias: reg_alpha]\nL1 regularization term on weights, increase this value will make model more conservative.");
addDoubleOption("reg_alpha");
addStringOption("tree_method", "[default='auto'], The tree construction algorithm used in XGBoost; Choices: {'auto', 'exact', 'approx'} ");
addDoubleOption("sketch_eps", "[default=0.03], range: (0, 1)\nOnly used for approximate greedy algorithm. This roughly translated into O(1 / sketch_eps) number of bins. Compared to directly select number of bins, this comes with theoretical guarantee with sketch accuracy. Usually user does not have to tune this. but consider setting to a lower number for more accurate enumeration.");
addDoubleOption("scale_pos_weight", "[default=1]\nControl the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases)");
addStringOption("updater", "[default='grow_colmaker,prune']\nA comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters.");
addIntOption("refresh_leaf", "[default=1]\nThis is a parameter of the 'refresh' updater plugin. When this flag is true, tree leafs as well as tree nodes' stats are updated. When it is false, only node stats are updated.");
addStringOption("process_type");
//Additional parameters for Dart Booster
addStringOption("sample_type", "[default=\"uniform\"]\ntype of sampling algorithm:\n-\"uniform\": dropped trees are selected uniformly.\n-\"weighted\": dropped trees are selected in proportion to weight.");
addStringOption("normalize_type", "[default=\"tree\"]\ntype of normalization algorithm:\n-\"tree\": new trees have the same weight of each of dropped trees.\n-\"forest\": new trees have the same weight of sum of dropped trees (forest).");
addDoubleOption("rate_drop", "[default=0.0], range: [0.0, 1.0]\ndropout rate (a fraction of previous trees to drop during the dropout)");
addIntOption("one_drop", "[default=0]\nwhen this flag is enabled, at least one tree is always dropped during the dropout (allows Binomial-plus-one or epsilon-dropout from the original DART paper).");
addDoubleOption("skip_drop", "[default=0.0], range: [0.0, 1.0]\nProbability of skipping the dropout procedure during a boosting iteration.");
//Additional parameters for Linear Booster
//addDoubleOption("lambda");
//addDoubleOption("alpha");
addDoubleOption("lambda_bias", "[default=0, alias: reg_lambda_bias]\nL2 regularization term on bias (no L1 reg on bias because it is not important)");
addDoubleOption("reg_lambda_bias");
//Learning Task Parameters
addStringOption("objective", "[ default=reg:linear ]\n" +
"\"reg:linear\" --linear regression\n" +
"\"reg:logistic\" --logistic regression\n" +
"\"binary:logistic\" --logistic regression for binary classification, output probability\n" +
"\"binary:logitraw\" --logistic regression for binary classification, output score before logistic transformation\n" +
"\"count:poisson\" --poisson regression for count data, output mean of poisson distribution [max_delta_step is set to 0.7 by default in poisson regression (used to safeguard optimization)]\n" +
"\"multi:softmax\" --set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)\n" +
"\"multi:softprob\" --same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.\n" +
"\"rank:pairwise\" --set XGBoost to do ranking task by minimizing the pairwise loss\n" +
"\"reg:gamma\" --gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be gamma-distributed\n" +
"\"reg:tweedie\" --Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be Tweedie-distributed.");
addIntOption("num_class", "number of classes");
addDoubleOption("base_score", "[ default=0.5 ]\n-the initial prediction score of all instances, global bias\n-for sufficient number of iterations, changing this value will not have too much effect.");
addStringOption("eval_metric", "[ default according to objective ]\nevaluation metrics for validation data, a default metric will be assigned according to objective (rmse for regression, and error for classification, mean average precision for ranking )");
addIntOption("seed", "[ default=0 ]\nrandom number seed.");
//Parameters for Tweedie Regression
addDoubleOption("tweedie_variance_power", "[default=1.5], range: (1,2)\nparameter that controls the variance of the Tweedie distribution\n-set closer to 2 to shift towards a gamma distribution\n-set closer to 1 to shift towards a Poisson distribution.");
}
private Integer numRound = 10; // number of iterations //default 20
private boolean usePredictor = false;
private Predictor predictor;
public String globalInfo() {
return "Class for a XGBoost classifier.";
}
public void buildClassifier(Instances instances) throws Exception {
//can classifier handle the data?
getCapabilities().testWithFail(instances);
//remove instances with missing class
instances = new Instances(instances);
instances.deleteWithMissingClass();
DMatrix dmat = DMatrixLoader.instancesToDMatrix(instances);
Map<String, DMatrix> watches = new HashMap<>();
watches.put("train", dmat);
if (!params.containsKey("num_class")) {
params.put("num_class", instances.numClasses());
}
booster = ml.dmlc.xgboost4j.java.XGBoost.train(dmat, params, numRound, watches, null, null);
if (usePredictor) {
//Load model and create Predictor
this.predictor = new Predictor(new ByteArrayInputStream(booster.toByteArray())); //kryo-2.21.jar
}
}
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
if (!forceProbabilityDistribution && !probabilityObjective.contains(params.get("objective"))) {
return super.distributionForInstance(instance);
}
return predictInstance(instance);
}
private double[] predictInstance(Instance instance) throws XGBoostError {
if (usePredictor) {
return predictWithPredictor(instance);
}
return predictWithDMatrix(instance);
}
private double[] predictWithDMatrix(Instance instance) throws XGBoostError {
DMatrix dmat = DMatrixLoader.instanceToDMatrix(instance);
float[][] predict = booster.predict(dmat);
double[] predictDouble = new double[predict[0].length];
for (int i = 0; i < predict[0].length; i++) {
predictDouble[i] = predict[0][i];
}
return predictDouble;
}
private double[] predictWithPredictor(Instance instance) {
Map<Integer, Double> dataMap = new HashMap<>();
Attribute classAttribute = instance.classAttribute();
int classAttrIndex = classAttribute.index();
Enumeration<Attribute> attributeEnumeration = instance.enumerateAttributes();
while (attributeEnumeration.hasMoreElements()) {
Attribute attribute = attributeEnumeration.nextElement();
int attrIndex = attribute.index();
if (attrIndex == classAttrIndex) {
continue;
}
dataMap.put(attrIndex, instance.value(attribute));
}
FVec fVecSparse = FVec.Transformer.fromMap(dataMap);
//solving double to tmp
float tmp[]=predictor.predict(fVecSparse);
double doubleTmp[]=new double[tmp.length];
for(int i=0;i<tmp.length;i++)
doubleTmp[i]=(double)tmp[i];
return doubleTmp;
}
@Override
public double classifyInstance(Instance instance) throws Exception {
if (forceProbabilityDistribution && probabilityObjective.contains(params.get("objective"))) {
return super.classifyInstance(instance);
}
double[] predict = predictInstance(instance);
return predict[0];
}
/**
* Returns an enumeration describing the available options.
*
* @return an enumeration of all the available options.
*/
@Override
public Enumeration<Option> listOptions() {
Vector<Option> newVector = new Vector<Option>(3);
newVector.addElement(new Option("Number of boosting iterations", "num_round", 1, "-num_round <integer>"));
newVector.addElement(new Option("Force probability distribution", "force-probability-distribution", 0, "-force-probability-distribution"));
newVector.addElement(new Option("Use predictor for inline (single instance) predictions.", "use-predictor", 0, "-use-predictor"));
xgBoostParamsOptions.forEach(newVector::addElement);
newVector.addAll(Collections.list(super.listOptions()));
return newVector.elements();
}
/**
* Parses a given list of options.
* <!-- options-start -->
* * Valid options are: <p>
* *
* * <pre> -num_round <integer>
* * Number of boosting iterations</pre>
* *
* * <pre> -force-probability-distribution
* * Force probability distribution</pre>
* *
* * <pre> -use-predictor
* * Use predictor for inline (single instance) predictions.</pre>
* *
* * <pre> -booster <string>
* * [default=gbtree]
* * which booster to use, can be gbtree, gblinear or dart. gbtree and dart use tree based model while gblinear uses linear function</pre>
* *
* * <pre> -silent <integer>
* * [default=0]
* * 0 means printing running messages, 1 means silent mode.</pre>
* *
* * <pre> -nthread <integer>
* * [default to maximum number of threads available if not set]
* * number of parallel threads used to run xgboost</pre>
* *
* * <pre> -num_pbuffer <integer>
* * [set automatically by xgboost, no need to be set by user]
* * size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step</pre>
* *
* * <pre> -num_feature <integer>
* * [set automatically by xgboost, no need to be set by user]
* * feature dimension used in boosting, set to maximum dimension of the feature</pre>
* *
* * <pre> -eta <double>
* * [default=0.3, alias: learning_rate], range: [0,1]
* * step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features. and eta actually shrinks the feature weights to make the boosting process more conservative.</pre>
* *
* * <pre> -learning_rate <double>
* * [default=0.3, alias: eta]. </pre>
* *
* * <pre> -gamma <double>
* * [default=0, alias: min_split_loss], range: [0,∞]
* * minimum loss reduction required to make a further partition on a leaf node of the tree. The larger, the more conservative the algorithm will be.</pre>
* *
* * <pre> -min_split_loss <double>
* * alias: gamma</pre>
* *
* * <pre> -max_depth <integer>
* * [default=6], range: [1,∞]
* * maximum depth of a tree, increase this value will make the model more complex / likely to be overfitting.</pre>
* *
* * <pre> -min_child_weight <double>
* * [default=1], range: [0,∞]
* * minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be.</pre>
* *
* * <pre> -max_delta_step <double>
* * [default=0], range: [0,∞]
* * Maximum delta step we allow each tree's weight estimation to be. If the value is set to 0, it means there is no constraint. If it is set to a positive value, it can help making the update step more conservative. Usually this parameter is not needed, but it might help in logistic regression when class is extremely imbalanced. Set it to value of 1-10 might help control the update</pre>
* *
* * <pre> -subsample <double>
* * [default=1], range: (0,1],
* * subsample ratio of the training instance. Setting it to 0.5 means that XGBoost randomly collected half of the data instances to grow trees and this will prevent overfitting.</pre>
* *
* * <pre> -colsample_bytree <double>
* * [default=1], range: (0,1]
* * subsample ratio of columns when constructing each tree.</pre>
* *
* * <pre> -colsample_bylevel <double>
* * [default=1], range: (0,1]
* * subsample ratio of columns for each split, in each level.</pre>
* *
* * <pre> -lambda <double>
* * [default=1, alias: reg_lambda]
* * L2 regularization term on weights, increase this value will make model more conservative.</pre>
* *
* * <pre> -reg_lambda <double>
* * </pre>
* *
* * <pre> -alpha <double>
* * [default=0, alias: reg_alpha]
* * L1 regularization term on weights, increase this value will make model more conservative.</pre>
* *
* * <pre> -reg_alpha <double>
* * </pre>
* *
* * <pre> -tree_method <string>
* * [default='auto'], The tree construction algorithm used in XGBoost; Choices: {'auto', 'exact', 'approx'} </pre>
* *
* * <pre> -sketch_eps <double>
* * [default=0.03], range: (0, 1)
* * Only used for approximate greedy algorithm. This roughly translated into O(1 / sketch_eps) number of bins. Compared to directly select number of bins, this comes with theoretical guarantee with sketch accuracy. Usually user does not have to tune this. but consider setting to a lower number for more accurate enumeration.</pre>
* *
* * <pre> -scale_pos_weight <double>
* * [default=1]
* * Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases)</pre>
* *
* * <pre> -updater <string>
* * [default='grow_colmaker,prune']
* * A comma separated string defining the sequence of tree updaters to run, providing a modular way to construct and to modify the trees. This is an advanced parameter that is usually set automatically, depending on some other parameters.</pre>
* *
* * <pre> -refresh_leaf <integer>
* * [default=1]
* * This is a parameter of the 'refresh' updater plugin. When this flag is true, tree leafs as well as tree nodes' stats are updated. When it is false, only node stats are updated.</pre>
* *
* * <pre> -process_type <string>
* * </pre>
* *
* * <pre> -sample_type <string>
* * [default="uniform"]
* * type of sampling algorithm:
* * -"uniform": dropped trees are selected uniformly.
* * -"weighted": dropped trees are selected in proportion to weight.</pre>
* *
* * <pre> -normalize_type <string>
* * [default="tree"]
* * type of normalization algorithm:
* * -"tree": new trees have the same weight of each of dropped trees.
* * -"forest": new trees have the same weight of sum of dropped trees (forest).</pre>
* *
* * <pre> -rate_drop <double>
* * [default=0.0], range: [0.0, 1.0]
* * dropout rate (a fraction of previous trees to drop during the dropout)</pre>
* *
* * <pre> -one_drop <integer>
* * [default=0]
* * when this flag is enabled, at least one tree is always dropped during the dropout (allows Binomial-plus-one or epsilon-dropout from the original DART paper).</pre>
* *
* * <pre> -skip_drop <double>
* * [default=0.0], range: [0.0, 1.0]
* * Probability of skipping the dropout procedure during a boosting iteration.</pre>
* *
* * <pre> -lambda_bias <double>
* * [default=0, alias: reg_lambda_bias]
* * L2 regularization term on bias (no L1 reg on bias because it is not important)</pre>
* *
* * <pre> -reg_lambda_bias <double>
* * </pre>
* *
* * <pre> -objective <string>
* * [ default=reg:linear ]
* * "reg:linear" --linear regression
* * "reg:logistic" --logistic regression
* * "binary:logistic" --logistic regression for binary classification, output probability
* * "binary:logitraw" --logistic regression for binary classification, output score before logistic transformation
* * "count:poisson" --poisson regression for count data, output mean of poisson distribution [max_delta_step is set to 0.7 by default in poisson regression (used to safeguard optimization)]
* * "multi:softmax" --set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
* * "multi:softprob" --same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
* * "rank:pairwise" --set XGBoost to do ranking task by minimizing the pairwise loss
* * "reg:gamma" --gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be gamma-distributed
* * "reg:tweedie" --Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be Tweedie-distributed.</pre>
* *
* * <pre> -num_class <integer>
* * number of classes</pre>
* *
* * <pre> -base_score <double>
* * [ default=0.5 ]
* * -the initial prediction score of all instances, global bias
* * -for sufficient number of iterations, changing this value will not have too much effect.</pre>
* *
* * <pre> -eval_metric <string>
* * [ default according to objective ]
* * evaluation metrics for validation data, a default metric will be assigned according to objective (rmse for regression, and error for classification, mean average precision for ranking )</pre>
* *
* * <pre> -seed <integer>
* * [ default=0 ]
* * random number seed.</pre>
* *
* * <pre> -tweedie_variance_power <double>
* * [default=1.5], range: (1,2)
* * parameter that controls the variance of the Tweedie distribution
* * -set closer to 2 to shift towards a gamma distribution
* * -set closer to 1 to shift towards a Poisson distribution.</pre>
* *
* * <pre> -output-debug-info
* * If set, classifier is run in debug mode and
* * may output additional info to the console</pre>
* *
* * <pre> -do-not-check-capabilities
* * If set, classifier capabilities are not checked before classifier is built
* * (use with caution).</pre>
* *
* * <pre> -num-decimal-places
* * The number of decimal places for the output of numbers in the model (default 2).</pre>
* *
* * <pre> -batch-size
* * The desired batch size for batch prediction (default 100).</pre>
* *
* <!-- options-end -->
*
* @param options the list of options as an array of strings
* @throws Exception if an option is not supported
*/
@Override
public void setOptions(String[] options) throws Exception {
super.setOptions(options);
Integer num_round = getIntOptionValue("num_round", options);
if (num_round != null) {
this.numRound = num_round;
}
forceProbabilityDistribution = Utils.getFlag("force-probability-distribution", options);
usePredictor = Utils.getFlag("use-predictor", options);
xgBoostParamsOptions.forEach(o -> checkOption(o, options));
Utils.checkForRemainingOptions(options);
}
/**
* Gets the current settings of the classifier.
*
* @return an array of strings suitable for passing to setOptions
*/
@Override
public String[] getOptions() {
Vector<String> options = new Vector<String>();
if (forceProbabilityDistribution) {
options.add("-force-probability-distribution");
}
options.add("-num_round");
options.add(String.valueOf(this.numRound));
options.add("-use_predictor");
options.add(String.valueOf(this.usePredictor));
params.forEach((name, val) -> {
options.add("-" + name);
options.add(String.valueOf(val));
});
Collections.addAll(options, super.getOptions());
return options.toArray(new String[0]);
}
public Capabilities getCapabilities() {
Capabilities result = super.getCapabilities(); // returns the object from weka.classifiers.Classifier
//attributes
result.enable(Capability.NOMINAL_ATTRIBUTES);
result.enable(Capability.NUMERIC_ATTRIBUTES);
result.enable(Capability.DATE_ATTRIBUTES);
result.enable(Capability.MISSING_VALUES);
//class
result.enable(Capability.NOMINAL_CLASS);
result.enable(Capability.MISSING_CLASS_VALUES);
return result;
}
void checkOption(OptionWithType o, String[] options) {
if (OptionWithType.ArgType.STRING.equals(o.type())) {
checkStringOption(o.name(), options);
} else if (OptionWithType.ArgType.INTEGER.equals(o.type())) {
checkIntOption(o.name(), options);
} else if (OptionWithType.ArgType.DOUBLE.equals(o.type())) {
checkDoubleOption(o.name(), options);
}
}
void checkStringOption(String name, String[] options) {
String paramStr = getOptionValue(name, options);
if (paramStr != null) {
params.put(name, paramStr);
}
}
void checkDoubleOption(String name, String[] options) {
String paramStr = getOptionValue(name, options);
if (paramStr != null) {
params.put(name, Double.parseDouble(paramStr));
}
}
void checkIntOption(String name, String[] options) {
Integer param = getIntOptionValue(name, options);
if (param != null) {
params.put(name, param);
}
}
Integer getIntOptionValue(String name, String[] options) {
String paramStr = getOptionValue(name, options);
if (paramStr == null) {
return null;
}
return Integer.parseInt(paramStr);
}
String getOptionValue(String name, String[] options) {
try {
String option = Utils.getOption(name, options).trim();
return option.isEmpty() ? null : option;
} catch (Exception e) {
return null;
}
}
static void addStringOption(String name) {
addStringOption(name, "");
}
static void addStringOption(String name, String description) {
xgBoostParamsOptions.add(createOption(name, description, OptionWithType.ArgType.STRING));
}
static void addIntOption(String name) {
addIntOption(name, "");
}
static void addIntOption(String name, String description) {
xgBoostParamsOptions.add(createOption(name, description, OptionWithType.ArgType.INTEGER));
}
static void addDoubleOption(String name) {
addDoubleOption(name, "");
}
static void addDoubleOption(String name, String description) {
xgBoostParamsOptions.add(createOption(name, description, OptionWithType.ArgType.DOUBLE));
}
static OptionWithType createOption(String name, String description, OptionWithType.ArgType argType) {
String synopsis = "-" + name + " <" + argType.name().toLowerCase() + ">";
return new OptionWithType(description == null ? name : description, name, 1, synopsis, argType);
}
@Override
public TechnicalInformation getTechnicalInformation() {
return new TechnicalInformation(TechnicalInformation.Type.MISC);
}
} | 39,577 | 51.770667 | 471 | java |
Janus | Janus-master/src/minerful/AbstractMinerFulStarter.java | package minerful;
import org.apache.commons.cli.Options;
public abstract class AbstractMinerFulStarter {
public abstract Options setupOptions();
} | 152 | 20.857143 | 47 | java |
Janus | Janus-master/src/minerful/JanusDFGVariantAnalysisLauncher.java | package minerful;
import minerful.concept.TaskCharArchive;
import minerful.logparser.LogEventClassifier;
import minerful.logparser.LogParser;
import minerful.logparser.XesLogParser;
import minerful.params.SystemCmdParameters;
import minerful.reactive.params.JanusDFGVariantCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.reactive.variant.DFGPermutationResult;
import minerful.reactive.variant.DFGtimesVariantAnalysisCore;
import minerful.utils.MessagePrinter;
import java.io.File;
import java.util.List;
/**
* Class for launching Janus variant analysis on two logs
*/
public class JanusDFGVariantAnalysisLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusDFGVariantAnalysisLauncher.class);
private JanusDFGVariantCmdParameters janusParams;
private SystemCmdParameters systemParams;
private JanusPrintParameters janusViewParams;
private XesLogParser eventLog1;
private XesLogParser eventLog2;
public JanusDFGVariantAnalysisLauncher(JanusDFGVariantCmdParameters janusParams) {
this.janusParams = janusParams;
this.systemParams = new SystemCmdParameters();
this.janusViewParams = new JanusPrintParameters();
logger.info("Loading event logs...");
this.eventLog1 = (XesLogParser) deriveLogParserFromLogFile(janusParams.inputLogLanguage1, janusParams.inputLogFile1, janusParams.eventClassification, null);
this.eventLog2 = (XesLogParser) deriveLogParserFromLogFile(janusParams.inputLogLanguage2, janusParams.inputLogFile2, janusParams.eventClassification, eventLog1.getTaskCharArchive());
// this is a bit redundant, but to make sure that both have the same alphabet we recompute the first parser with the alphabet of the second, which now has both alphabets
this.eventLog1 = (XesLogParser) deriveLogParserFromLogFile(janusParams.inputLogLanguage1, janusParams.inputLogFile1, janusParams.eventClassification, eventLog2.getTaskCharArchive());
}
public JanusDFGVariantAnalysisLauncher(JanusDFGVariantCmdParameters janusParams, JanusPrintParameters viewParams, SystemCmdParameters systemParams) {
this(janusParams);
this.systemParams = systemParams;
this.janusViewParams = viewParams;
}
/**
* Returns the logParser of a given input log
*
* @param inputLanguage file format of the input event log
* @param inputLogFile path to the input file of the event log
* @param eventClassification
* @return LogParser of the input log
*/
public static LogParser deriveLogParserFromLogFile(JanusDFGVariantCmdParameters.LogInputEncoding inputLanguage, File inputLogFile, JanusDFGVariantCmdParameters.EventClassification eventClassification, TaskCharArchive taskCharArchive) {
LogParser logParser = null;
switch (inputLanguage) {
case xes:
case mxml:
LogEventClassifier.ClassificationType evtClassi = fromInputParamToXesLogClassificationType(eventClassification);
try {
logParser = new XesLogParser(inputLogFile, evtClassi, taskCharArchive);
} catch (Exception e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// Let us try to free memory from the unused XesDecoder!
System.gc();
break;
case strings:
try {
// logParser = new StringLogParser(inputLogFile, LogEventClassifier.ClassificationType.NAME, taskCharArchive);
throw new UnsupportedOperationException("Only XES log supported");
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
break;
default:
throw new UnsupportedOperationException("This encoding (" + inputLanguage + ") is not yet supported");
}
return logParser;
}
/**
* Returns the classification type for a given event log encoding
*
* @param evtClassInputParam
* @return
*/
public static LogEventClassifier.ClassificationType fromInputParamToXesLogClassificationType(JanusDFGVariantCmdParameters.EventClassification evtClassInputParam) {
switch (evtClassInputParam) {
case name:
return LogEventClassifier.ClassificationType.NAME;
case logspec:
return LogEventClassifier.ClassificationType.LOG_SPECIFIED;
default:
throw new UnsupportedOperationException("Classification strategy " + evtClassInputParam + " not yet implemented");
}
}
public TaskCharArchive getAlphabetDecoder() {
return eventLog2.getTaskCharArchive();
}
/**
* analyse the time differences between the direct follow succession relations in the two event logs
*
* @return
*/
public List<DFGPermutationResult> checkVariants() {
DFGtimesVariantAnalysisCore variantAnalysisCore = new DFGtimesVariantAnalysisCore(
eventLog1, eventLog2, janusParams, janusViewParams);
return variantAnalysisCore.checkWithGraph();
}
}
| 5,294 | 42.760331 | 239 | java |
Janus | Janus-master/src/minerful/JanusDFGVariantAnalysisStarter.java | package minerful;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.reactive.io.JanusDFGVariantOutputManagementLauncher;
import minerful.reactive.params.JanusDFGVariantCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.reactive.variant.DFGPermutationResult;
import minerful.reactive.variant.DFGtimesVariantAnalysisCore;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import java.util.List;
/**
* Class to start from terminal Janus variant analysis
*/
public class JanusDFGVariantAnalysisStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusDFGVariantAnalysisStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
// outputOptions = OutputModelParameters.parseableOptions(),
// postPrOptions = PostProcessingCmdParameters.parseableOptions(),
viewOptions = ViewCmdParameters.parseableOptions(),
janusViewOptions = JanusPrintParameters.parseableOptions(),
// chkOptions = CheckingCmdParameters.parseableOptions(),
// inputLogOptions = InputLogCmdParameters.parseableOptions(),
// inpuModlOptions = InputModelParameters.parseableOptions(),
janusOptions = JanusDFGVariantCmdParameters.parseableOptions();
for (Object opt : systemOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
// for (Object opt : outputOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : postPrOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : viewOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
for (Object opt : janusViewOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
// for (Object opt : chkOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : inputLogOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : inpuModlOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
for (Object opt : janusOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
JanusDFGVariantAnalysisStarter checkStarter = new JanusDFGVariantAnalysisStarter();
Options cmdLineOptions = checkStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
// OutputModelParameters outParams =
// new OutputModelParameters(
// cmdLineOptions,
// args);
// PostProcessingCmdParameters preProcParams =
// new PostProcessingCmdParameters(
// cmdLineOptions,
// args);
// CheckingCmdParameters chkParams =
// new CheckingCmdParameters(
// cmdLineOptions,
// args);
// InputLogCmdParameters inputLogParams =
// new InputLogCmdParameters(
// cmdLineOptions,
// args);
// InputModelParameters inpuModlParams =
// new InputModelParameters(
// cmdLineOptions,
// args);
// ViewCmdParameters viewParams =
// new ViewCmdParameters(
// cmdLineOptions,
// args);
JanusPrintParameters janusViewParams =
new JanusPrintParameters(
cmdLineOptions,
args);
JanusDFGVariantCmdParameters janusParams =
new JanusDFGVariantCmdParameters(
cmdLineOptions,
args);
MessagePrinter.configureLogging(systemParams.debugLevel);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
double execTimeStart = System.currentTimeMillis();
JanusDFGVariantAnalysisLauncher variantAnalysis = new JanusDFGVariantAnalysisLauncher(janusParams, janusViewParams, systemParams);
List<DFGPermutationResult> results = variantAnalysis.checkVariants();
new JanusDFGVariantOutputManagementLauncher().manageVariantOutput(
results,
janusParams,
janusViewParams,
systemParams,
janusParams.inputLogFile1.getName().substring(0, janusParams.inputLogFile1.getName().lastIndexOf('.')),
janusParams.inputLogFile2.getName().substring(0, janusParams.inputLogFile2.getName().lastIndexOf('.'))
);
double execTimeEnd = System.currentTimeMillis();
logger.info("Total execution time: " + (execTimeEnd - execTimeStart));
}
}
| 5,459 | 40.052632 | 138 | java |
Janus | Janus-master/src/minerful/JanusMeasurementsLauncher.java | package minerful;
import minerful.checking.params.CheckingCmdParameters;
import minerful.concept.ProcessModel;
import minerful.io.ProcessModelLoader;
import minerful.io.params.InputModelParameters;
import minerful.logparser.LogParser;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.reactive.measurements.MegaMatrixMonster;
import minerful.reactive.measurements.ReactiveMeasurementsOfflineQueryingCore;
import minerful.reactive.params.JanusMeasurementsCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.utils.MessagePrinter;
import org.processmining.plugins.declareminer.visualizing.AssignmentModel;
/**
* Class for launching JanusZ model checker
*/
public class JanusMeasurementsLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusMeasurementsLauncher.class);
private ProcessModel processSpecification;
private LogParser eventLog;
private CheckingCmdParameters chkParams;
private JanusMeasurementsCmdParameters janusParams;
private JanusPrintParameters janusViewParams;
private JanusMeasurementsLauncher(CheckingCmdParameters chkParams, JanusMeasurementsCmdParameters janusParams) {
this.chkParams = chkParams;
this.janusParams = janusParams;
this.janusViewParams = new JanusPrintParameters();
}
public JanusMeasurementsLauncher(AssignmentModel declareMapModel, LogParser inputLog, CheckingCmdParameters chkParams, JanusMeasurementsCmdParameters janusParams) {
this(chkParams, janusParams);
this.processSpecification = new ProcessModelLoader().loadProcessModel(declareMapModel);
this.eventLog = inputLog;
}
public JanusMeasurementsLauncher(ProcessModel minerFulProcessModel, LogParser inputLog, CheckingCmdParameters chkParams, JanusMeasurementsCmdParameters janusParams) {
this(chkParams, janusParams);
this.processSpecification = minerFulProcessModel;
this.eventLog = inputLog;
}
public JanusMeasurementsLauncher(InputModelParameters inputParams, InputLogCmdParameters inputLogParams, CheckingCmdParameters chkParams, SystemCmdParameters systemParams, JanusMeasurementsCmdParameters janusParams) {
this(chkParams, janusParams);
if (inputParams.inputFile == null) {
systemParams.printHelpForWrongUsage("Input process model file missing!");
System.exit(1);
}
this.eventLog = MinerFulMinerLauncher.deriveLogParserFromLogFile(inputLogParams);
// Load the process specification from the file
this.processSpecification =
new ProcessModelLoader().loadProcessModel(inputParams.inputLanguage, inputParams.inputFile, this.eventLog.getTaskCharArchive());
// Apply some preliminary pruning
// PostProcessingCmdParameters preProcParams; //from input
// MinerFulPruningCore pruniCore = new MinerFulPruningCore(this.processSpecification, preProcParams);
// this.processSpecification.bag = pruniCore.massageConstraints();
MessagePrinter.configureLogging(systemParams.debugLevel);
}
public JanusMeasurementsLauncher(InputModelParameters inputParams, InputLogCmdParameters inputLogParams, CheckingCmdParameters chkParams, SystemCmdParameters systemParams, JanusMeasurementsCmdParameters janusParams, JanusPrintParameters janusViewParams) {
this(inputParams, inputLogParams, chkParams, systemParams, janusParams);
this.janusViewParams = janusViewParams;
}
public ProcessModel getProcessSpecification() {
return processSpecification;
}
public LogParser getEventLog() {
return eventLog;
}
/**
* Check the input model against the input log.
*/
public MegaMatrixMonster checkModel() {
// the events evaluation must be computed in any case
processSpecification.bag.initAutomataBag();
ReactiveMeasurementsOfflineQueryingCore reactiveMeasurementsOfflineQueryingCore = new ReactiveMeasurementsOfflineQueryingCore(
0, eventLog, janusParams, janusViewParams, null, eventLog.getTaskCharArchive(), null, processSpecification.bag);
double before = System.currentTimeMillis();
MegaMatrixMonster result = reactiveMeasurementsOfflineQueryingCore.check();
double after = System.currentTimeMillis();
logger.info("Total events evaluation time: " + (after - before));
// Compute the measures at the detail level selected in input
before = System.currentTimeMillis();
switch (janusParams.detailsLevel) {
case event:
break;
case trace:
if (janusParams.measure.equals("all")) {
result.computeAllTraceMeasures(janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
} else {
result.computeSingleTraceMeasures(janusParams.measure, janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
}
break;
case allTrace:
case traceStats:
if (janusParams.measure.equals("all")) {
result.computeAllTraceMeasures(janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
result.computeAllTraceMeasuresStats(janusParams.nanLogSkipFlag);
} else {
result.computeSingleTraceMeasures(janusParams.measure, janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
result.computeSingleTraceMeasuresStats(janusParams.nanLogSkipFlag);
}
break;
case log:
if (janusParams.measure.equals("all")) {
result.computeAllLogMeasures();
} else {
result.computeSingleLogMeasures(janusParams.measure);
}
break;
case allLog:
case all:
if (janusParams.measure.equals("all")) {
result.computeAllTraceMeasures(janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
result.computeAllTraceMeasuresStats(janusParams.nanLogSkipFlag);
result.computeAllLogMeasures();
} else {
result.computeSingleTraceMeasures(janusParams.measure, janusParams.nanTraceSubstituteFlag, janusParams.nanTraceSubstituteValue);
result.computeSingleTraceMeasuresStats(janusParams.nanLogSkipFlag);
result.computeSingleLogMeasures(janusParams.measure);
}
break;
}
after = System.currentTimeMillis();
logger.info("Total measurement retrieval time: " + (after - before));
return result;
}
} | 6,889 | 47.181818 | 259 | java |
Janus | Janus-master/src/minerful/JanusMeasurementsStarter.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package minerful;
import minerful.checking.params.CheckingCmdParameters;
import minerful.concept.TaskCharArchive;
import minerful.io.params.InputModelParameters;
import minerful.io.params.OutputModelParameters;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.reactive.measurements.MegaMatrixMonster;
import minerful.reactive.io.JanusMeasurementsOutputManagementLauncher;
import minerful.reactive.params.JanusMeasurementsCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
public class JanusMeasurementsStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusMeasurementsStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
outputOptions = OutputModelParameters.parseableOptions(),
// postPrOptions = PostProcessingCmdParameters.parseableOptions(),
// viewOptions = ViewCmdParameters.parseableOptions(),
janusViewOptions = JanusPrintParameters.parseableOptions(),
chkOptions = CheckingCmdParameters.parseableOptions(),
inputLogOptions = InputLogCmdParameters.parseableOptions(),
inpuModlOptions = InputModelParameters.parseableOptions(),
janusOptions = JanusMeasurementsCmdParameters.parseableOptions();
for (Object opt : systemOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : outputOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
// for (Object opt : postPrOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : viewOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
for (Object opt : janusViewOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : chkOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : inputLogOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : inpuModlOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : janusOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
JanusMeasurementsStarter checkStarter = new JanusMeasurementsStarter();
Options cmdLineOptions = checkStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
// PostProcessingCmdParameters preProcParams =
// new PostProcessingCmdParameters(
// cmdLineOptions,
// args);
CheckingCmdParameters chkParams =
new CheckingCmdParameters(
cmdLineOptions,
args);
InputLogCmdParameters inputLogParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
InputModelParameters inpuModlParams =
new InputModelParameters(
cmdLineOptions,
args);
// ViewCmdParameters viewParams =
// new ViewCmdParameters(
// cmdLineOptions,
// args);
JanusPrintParameters janusViewParams =
new JanusPrintParameters(
cmdLineOptions,
args);
JanusMeasurementsCmdParameters janusParams =
new JanusMeasurementsCmdParameters(
cmdLineOptions,
args);
MessagePrinter.configureLogging(systemParams.debugLevel);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
JanusMeasurementsLauncher miFuCheLa = new JanusMeasurementsLauncher(inpuModlParams, inputLogParams, chkParams, systemParams, janusParams, janusViewParams);
MegaMatrixMonster evaluation = miFuCheLa.checkModel();
TaskCharArchive alphabet = miFuCheLa.getProcessSpecification().getTaskCharArchive(); // note. The character mapping of the model is greater or equal to the log parser one because it is constructed starting from it
new JanusMeasurementsOutputManagementLauncher().manageMeasurementsOutput(evaluation, janusViewParams, outParams, systemParams, janusParams, alphabet);
}
} | 5,283 | 42.311475 | 221 | java |
Janus | Janus-master/src/minerful/JanusMinerStarter.java | package minerful;
import minerful.concept.ProcessModel;
import minerful.concept.TaskChar;
import minerful.concept.TaskCharArchive;
import minerful.concept.constraint.ConstraintsBag;
import minerful.io.params.OutputModelParameters;
import minerful.logparser.LogParser;
import minerful.miner.core.MinerFulKBCore;
import minerful.miner.core.MinerFulPruningCore;
import minerful.miner.core.MinerFulQueryingCore;
import minerful.miner.params.MinerFulCmdParameters;
import minerful.miner.stats.GlobalStatsTable;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.reactive.miner.ReactiveMinerOfflineQueryingCore;
import minerful.reactive.miner.ReactiveMinerPruningCore;
import minerful.reactive.miner.ReactiveMinerQueryingCore;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.concurrent.*;
/**
* Start Janus-classic (online ready) mining from command line
*/
public class JanusMinerStarter extends AbstractMinerFulStarter {
protected static final String PROCESS_MODEL_NAME_PATTERN = "Process model discovered from %s";
protected static final String DEFAULT_ANONYMOUS_MODEL_NAME = "Discovered process model";
private static MessagePrinter logger = MessagePrinter.getInstance(JanusMinerStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options minerfulOptions = MinerFulCmdParameters.parseableOptions(),
inputOptions = InputLogCmdParameters.parseableOptions(),
systemOptions = SystemCmdParameters.parseableOptions(),
viewOptions = ViewCmdParameters.parseableOptions(),
outputOptions = OutputModelParameters.parseableOptions(),
postProptions = PostProcessingCmdParameters.parseableOptions();
for (Object opt: postProptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: minerfulOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: inputOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: viewOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: outputOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: systemOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
/**
* @param args
* the command line arguments: [regular expression] [number of
* strings] [minimum number of characters per string] [maximum
* number of characters per string] [alphabet]...
*/
public static void main(String[] args) {
JanusMinerStarter minerMinaStarter = new JanusMinerStarter();
Options cmdLineOptions = minerMinaStarter.setupOptions();
InputLogCmdParameters inputParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
MinerFulCmdParameters minerFulParams =
new MinerFulCmdParameters(
cmdLineOptions,
args);
ViewCmdParameters viewParams =
new ViewCmdParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
PostProcessingCmdParameters postParams =
new PostProcessingCmdParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
if (!isEventLogGiven(cmdLineOptions, inputParams, systemParams)) {
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
logger.info("Loading log...");
LogParser logParser = MinerFulMinerLauncher.deriveLogParserFromLogFile(
inputParams,
minerFulParams);
TaskCharArchive taskCharArchive = logParser.getTaskCharArchive();
ProcessModel processModel = minerMinaStarter.mine(logParser, inputParams, minerFulParams, postParams, taskCharArchive);
new MinerFulOutputManagementLauncher().manageOutput(processModel, viewParams, outParams, systemParams, logParser);
}
public static boolean isEventLogGiven(Options cmdLineOptions, InputLogCmdParameters inputParams,
SystemCmdParameters systemParams) {
if (inputParams.inputLogFile == null) {
systemParams.printHelpForWrongUsage("Input log file missing! Please use the " +
InputLogCmdParameters.INPUT_LOGFILE_PATH_PARAM_NAME +
" option.",
cmdLineOptions);
return false;
}
return true;
}
public ProcessModel mine(LogParser logParser,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams, Character[] alphabet) {
return this.mine(logParser, null, minerFulParams, postParams, alphabet);
}
public ProcessModel mine(LogParser logParser,
InputLogCmdParameters inputParams, MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams, Character[] alphabet) {
TaskCharArchive taskCharArchive = new TaskCharArchive(alphabet);
return this.mine(logParser, inputParams, minerFulParams, postParams, taskCharArchive);
}
public ProcessModel mine(LogParser logParser,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams, TaskCharArchive taskCharArchive) {
return this.mine(logParser, null, minerFulParams, postParams, taskCharArchive);
}
public ProcessModel mine(LogParser logParser,
InputLogCmdParameters inputParams, MinerFulCmdParameters minerFulParams, PostProcessingCmdParameters postParams, TaskCharArchive taskCharArchive) {
// GlobalStatsTable globalStatsTable = new GlobalStatsTable(taskCharArchive, minerFulParams.branchingLimit);
// globalStatsTable = computeKB(logParser, minerFulParams,
// taskCharArchive, globalStatsTable);
System.gc();
ProcessModel proMod = ProcessModel.generateNonEvaluatedBinaryModel(taskCharArchive);
proMod.setName(makeDiscoveredProcessName(inputParams));
/* Substitution of mining core with the Janus reactiveMiner */
// proMod.bag = queryForConstraints(logParser, minerFulParams, postParams, taskCharArchive, globalStatsTable, proMod.bag); // MINERful
proMod.bag = reactiveQueryForConstraints(logParser, minerFulParams, postParams, taskCharArchive, null, proMod.bag);
// proMod.bag = reactiveOfflineQueryForConstraints(logParser, minerFulParams, postParams, taskCharArchive, globalStatsTable, proMod.bag);
System.gc();
/* TODO take back the full post processing and adapt it to the separation technique*/
// pruneConstraints(proMod, minerFulParams, postParams);
new ReactiveMinerPruningCore(proMod, minerFulParams, postParams).pruneNonActiveConstraints();
return proMod;
}
public static String makeDiscoveredProcessName(InputLogCmdParameters inputParams) {
return (inputParams != null && inputParams.inputLogFile != null ) ?
String.format(JanusMinerStarter.PROCESS_MODEL_NAME_PATTERN, inputParams.inputLogFile.getName()) :
DEFAULT_ANONYMOUS_MODEL_NAME;
}
protected GlobalStatsTable computeKB(LogParser logParser,
MinerFulCmdParameters minerFulParams,
TaskCharArchive taskCharArchive, GlobalStatsTable globalStatsTable) {
int coreNum = 0;
long before = 0, after = 0;
if (minerFulParams.isParallelKbComputationRequired()) {
// Slice the log
List<LogParser> listOfLogParsers = logParser
.split(minerFulParams.kbParallelProcessingThreads);
List<MinerFulKBCore> listOfMinerFulCores = new ArrayList<MinerFulKBCore>(
minerFulParams.kbParallelProcessingThreads);
// Associate a dedicated KB-computing core to each log slice
for (LogParser slicedLogParser : listOfLogParsers) {
listOfMinerFulCores.add(new MinerFulKBCore(
coreNum++,
slicedLogParser,
minerFulParams, taskCharArchive));
}
ExecutorService executor = Executors
.newFixedThreadPool(minerFulParams.kbParallelProcessingThreads);
// ForkJoinPool executor = new ForkJoinPool(minerFulParams.kbParallelProcessingThreads);
try {
before = System.currentTimeMillis();
for (Future<GlobalStatsTable> statsTab : executor
.invokeAll(listOfMinerFulCores)) {
globalStatsTable.mergeAdditively(statsTab.get());
}
after = System.currentTimeMillis();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
System.exit(1);
}
executor.shutdown();
} else {
MinerFulKBCore minerFulKbCore = new MinerFulKBCore(
coreNum++,
logParser,
minerFulParams, taskCharArchive);
before = System.currentTimeMillis();
globalStatsTable = minerFulKbCore.discover();
after = System.currentTimeMillis();
}
logger.info("Total KB construction time: " + (after - before));
return globalStatsTable;
}
private ConstraintsBag reactiveQueryForConstraints(
LogParser logParser, MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postPrarams, TaskCharArchive taskCharArchive,
GlobalStatsTable globalStatsTable, ConstraintsBag bag) {
int coreNum = 0;
long before, after = 0;
if (minerFulParams.isParallelQueryProcessingRequired() && minerFulParams.isBranchingRequired()) {
logger.warn("Parallel querying of branched constraints not yet implemented. Proceeding with the single-core operations...");
}
/* JReactiveMiner Querying Core
*
* @author Alessio
* */
ReactiveMinerQueryingCore minerFulQueryingCore = new ReactiveMinerQueryingCore(coreNum++,
logParser, minerFulParams, postPrarams, taskCharArchive,
globalStatsTable, bag);
before = System.currentTimeMillis();
minerFulQueryingCore.discover();
after = System.currentTimeMillis();
logger.info("Total KB querying time: " + (after - before));
return bag;
}
} | 9,978 | 36.374532 | 150 | java |
Janus | Janus-master/src/minerful/JanusOfflineMinerStarter.java | package minerful;
import minerful.concept.ProcessModel;
import minerful.concept.TaskCharArchive;
import minerful.concept.constraint.ConstraintsBag;
import minerful.io.params.OutputModelParameters;
import minerful.logparser.LogParser;
import minerful.miner.core.MinerFulKBCore;
import minerful.miner.params.MinerFulCmdParameters;
import minerful.miner.stats.GlobalStatsTable;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.reactive.miner.ReactiveMinerOfflineQueryingCore;
import minerful.reactive.miner.ReactiveMinerPruningCore;
import minerful.reactive.params.JanusPrintParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;
/**
* Start Janus offline mining from command line
*/
public class JanusOfflineMinerStarter extends AbstractMinerFulStarter {
protected static final String PROCESS_MODEL_NAME_PATTERN = "Process model discovered from %s";
protected static final String DEFAULT_ANONYMOUS_MODEL_NAME = "Discovered process model";
private static MessagePrinter logger = MessagePrinter.getInstance(JanusOfflineMinerStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options minerfulOptions = MinerFulCmdParameters.parseableOptions(),
inputOptions = InputLogCmdParameters.parseableOptions(),
systemOptions = SystemCmdParameters.parseableOptions(),
viewOptions = ViewCmdParameters.parseableOptions(),
janusViewOptions = JanusPrintParameters.parseableOptions(),
outputOptions = OutputModelParameters.parseableOptions(),
postProptions = PostProcessingCmdParameters.parseableOptions();
for (Object opt : postProptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : minerfulOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : inputOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : viewOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : janusViewOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : outputOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : systemOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
return cmdLineOptions;
}
/**
* @param args the command line arguments: [regular expression] [number of
* strings] [minimum number of characters per string] [maximum
* number of characters per string] [alphabet]...
*/
public static void main(String[] args) {
JanusOfflineMinerStarter minerMinaStarter = new JanusOfflineMinerStarter();
Options cmdLineOptions = minerMinaStarter.setupOptions();
InputLogCmdParameters inputParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
MinerFulCmdParameters minerFulParams =
new MinerFulCmdParameters(
cmdLineOptions,
args);
ViewCmdParameters viewParams =
new ViewCmdParameters(
cmdLineOptions,
args);
JanusPrintParameters janusPrintParameters =
new JanusPrintParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
PostProcessingCmdParameters postParams =
new PostProcessingCmdParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
if (!isEventLogGiven(cmdLineOptions, inputParams, systemParams)) {
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
logger.info("Loading log...");
LogParser logParser = MinerFulMinerLauncher.deriveLogParserFromLogFile(
inputParams,
minerFulParams);
TaskCharArchive taskCharArchive = logParser.getTaskCharArchive();
ProcessModel processModel = minerMinaStarter.mine(logParser, inputParams, minerFulParams, postParams, taskCharArchive, janusPrintParameters);
new MinerFulOutputManagementLauncher().manageOutput(processModel, viewParams, outParams, systemParams, logParser);
}
public static boolean isEventLogGiven(Options cmdLineOptions, InputLogCmdParameters inputParams,
SystemCmdParameters systemParams) {
if (inputParams.inputLogFile == null) {
systemParams.printHelpForWrongUsage("Input log file missing! Please use the " +
InputLogCmdParameters.INPUT_LOGFILE_PATH_PARAM_NAME +
" option.",
cmdLineOptions);
return false;
}
return true;
}
public ProcessModel mine(LogParser logParser,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams,
Character[] alphabet) {
return this.mine(logParser, null, minerFulParams, postParams, alphabet);
}
public ProcessModel mine(LogParser logParser,
InputLogCmdParameters inputParams,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams,
Character[] alphabet) {
TaskCharArchive taskCharArchive = new TaskCharArchive(alphabet);
return this.mine(logParser, inputParams, minerFulParams, postParams, taskCharArchive, new JanusPrintParameters());
}
public ProcessModel mine(LogParser logParser,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams,
TaskCharArchive taskCharArchive) {
return this.mine(logParser, null, minerFulParams, postParams, taskCharArchive, new JanusPrintParameters());
}
public ProcessModel mine(LogParser logParser,
InputLogCmdParameters inputParams,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams,
TaskCharArchive taskCharArchive,
JanusPrintParameters janusPrintParameters) {
// COMPUTATION MINERful heuristics not useful in Janus
// GlobalStatsTable globalStatsTable = new GlobalStatsTable(taskCharArchive, minerFulParams.branchingLimit);
// globalStatsTable = computeKB(logParser, minerFulParams,
// taskCharArchive, globalStatsTable);
System.gc();
ProcessModel proMod = ProcessModel.generateNonEvaluatedBinaryModel(taskCharArchive);
proMod.setName(makeDiscoveredProcessName(inputParams));
/* Substitution of mining core with the Janus reactiveMiner */
proMod.bag = reactiveOfflineQueryForConstraints(logParser, minerFulParams, postParams, taskCharArchive, null, proMod.bag, janusPrintParameters);
System.gc();
/* TODO take back the full post processing and adapt it to the separation technique*/
// pruneConstraints(proMod, minerFulParams, postParams);
new ReactiveMinerPruningCore(proMod, minerFulParams, postParams).pruneNonActiveConstraints();
return proMod;
}
public ProcessModel mine(LogParser logParser, TaskCharArchive taskCharArchive) {
return mine(logParser, taskCharArchive, 0.1, 0.8, new JanusPrintParameters());// todo warning: hard coded default
}
public ProcessModel mine(LogParser logParser, TaskCharArchive taskCharArchive, double supportThreshold, double confidenceThreshold) {
return this.mine(logParser, taskCharArchive, supportThreshold, confidenceThreshold, new JanusPrintParameters());
}
public ProcessModel mine(LogParser logParser, TaskCharArchive taskCharArchive, double supportThreshold, double confidenceThreshold, JanusPrintParameters janusViewParams) {
System.gc();
ProcessModel proMod = ProcessModel.generateNonEvaluatedBinaryModel(taskCharArchive);
proMod.setName(logParser.toString());
MinerFulCmdParameters minerFulParams = new MinerFulCmdParameters();
PostProcessingCmdParameters postParams = new PostProcessingCmdParameters();
postParams.supportThreshold = supportThreshold;
postParams.confidenceThreshold = confidenceThreshold;
/* Substitution of mining core with the Janus reactiveMiner */
proMod.bag = reactiveOfflineQueryForConstraints(logParser, minerFulParams, postParams, taskCharArchive, null, proMod.bag, janusViewParams);
System.gc();
/* TODO take back the full post processing and adapt it to the separation technique*/
// pruneConstraints(proMod, minerFulParams, postParams);
new ReactiveMinerPruningCore(proMod, minerFulParams, postParams).pruneNonActiveConstraints();
return proMod;
}
public static String makeDiscoveredProcessName(InputLogCmdParameters inputParams) {
return (inputParams != null && inputParams.inputLogFile != null) ?
String.format(JanusOfflineMinerStarter.PROCESS_MODEL_NAME_PATTERN, inputParams.inputLogFile.getName()) :
DEFAULT_ANONYMOUS_MODEL_NAME;
}
protected GlobalStatsTable computeKB(LogParser logParser,
MinerFulCmdParameters minerFulParams,
TaskCharArchive taskCharArchive, GlobalStatsTable globalStatsTable) {
int coreNum = 0;
long before = 0, after = 0;
if (minerFulParams.isParallelKbComputationRequired()) {
// Slice the log
List<LogParser> listOfLogParsers = logParser
.split(minerFulParams.kbParallelProcessingThreads);
List<MinerFulKBCore> listOfMinerFulCores = new ArrayList<MinerFulKBCore>(
minerFulParams.kbParallelProcessingThreads);
// Associate a dedicated KB-computing core to each log slice
for (LogParser slicedLogParser : listOfLogParsers) {
listOfMinerFulCores.add(new MinerFulKBCore(
coreNum++,
slicedLogParser,
minerFulParams, taskCharArchive));
}
ExecutorService executor = Executors
.newFixedThreadPool(minerFulParams.kbParallelProcessingThreads);
// ForkJoinPool executor = new ForkJoinPool(minerFulParams.kbParallelProcessingThreads);
try {
before = System.currentTimeMillis();
for (Future<GlobalStatsTable> statsTab : executor
.invokeAll(listOfMinerFulCores)) {
globalStatsTable.mergeAdditively(statsTab.get());
}
after = System.currentTimeMillis();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
System.exit(1);
}
executor.shutdown();
} else {
MinerFulKBCore minerFulKbCore = new MinerFulKBCore(
coreNum++,
logParser,
minerFulParams, taskCharArchive);
before = System.currentTimeMillis();
globalStatsTable = minerFulKbCore.discover();
after = System.currentTimeMillis();
}
logger.info("Total KB construction time: " + (after - before));
return globalStatsTable;
}
private ConstraintsBag reactiveOfflineQueryForConstraints(LogParser logParser,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postPrarams,
TaskCharArchive taskCharArchive,
GlobalStatsTable globalStatsTable,
ConstraintsBag bag, JanusPrintParameters janusViewParams) {
int coreNum = 0;
long before = 0, after = 0;
if (minerFulParams.isParallelQueryProcessingRequired() && minerFulParams.isBranchingRequired()) {
logger.warn("Parallel querying of branched constraints not yet implemented. Proceeding with the single-core operations...");
}
/* Janus Offline Querying Core
*
* @author Alessio
* */
ReactiveMinerOfflineQueryingCore minerFulQueryingCore = new ReactiveMinerOfflineQueryingCore(coreNum++,
logParser, minerFulParams, postPrarams, taskCharArchive,
globalStatsTable, bag, janusViewParams);
before = System.currentTimeMillis();
minerFulQueryingCore.discover();
after = System.currentTimeMillis();
logger.info("Total KB querying time: " + (after - before));
return bag;
}
} | 14,060 | 44.504854 | 175 | java |
Janus | Janus-master/src/minerful/JanusVariantAnalysisLauncher.java | package minerful;
import minerful.concept.ProcessModel;
import minerful.concept.TaskCharArchive;
import minerful.io.ProcessModelLoader;
import minerful.io.params.OutputModelParameters;
import minerful.logparser.LogEventClassifier;
import minerful.logparser.LogParser;
import minerful.logparser.StringLogParser;
import minerful.logparser.XesLogParser;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.reactive.params.JanusVariantCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.reactive.variant.ReactiveVariantAnalysisCore;
import minerful.utils.MessagePrinter;
import java.io.File;
import java.util.Map;
/**
* Class for launching Janus variant analysis on two logs
*/
public class JanusVariantAnalysisLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusVariantAnalysisLauncher.class);
private PostProcessingCmdParameters discoveryParams;
private JanusVariantCmdParameters janusParams;
private SystemCmdParameters systemParams;
private JanusPrintParameters janusViewParams;
private LogParser eventLog1;
private ProcessModel processSpecification1;
private Map<String, Float> measurementsSpecification1;
private LogParser eventLog2;
private ProcessModel processSpecification2;
private Map<String, Float> measurementsSpecification2;
public JanusVariantAnalysisLauncher(JanusVariantCmdParameters janusParams, SystemCmdParameters systemParams) {
this.janusParams = janusParams;
this.systemParams = systemParams;
logger.info("Loading event logs...");
this.eventLog1 = deriveLogParserFromLogFile(janusParams.inputLogLanguage1, janusParams.inputLogFile1, janusParams.eventClassification, null);
this.eventLog2 = deriveLogParserFromLogFile(janusParams.inputLogLanguage2, janusParams.inputLogFile2, janusParams.eventClassification, eventLog1.getTaskCharArchive());
// this is a bit redundant, but to make sure that both have the same alphabet we recompute the first parser with the alphabet of the second, which now has both alphabets
this.eventLog1 = deriveLogParserFromLogFile(janusParams.inputLogLanguage1, janusParams.inputLogFile1, janusParams.eventClassification, eventLog2.getTaskCharArchive());
if (janusParams.inputModelFile1 != null && janusParams.inputModelFile2 != null) {
logger.info("Loading process models...");
this.processSpecification1 = new ProcessModelLoader().loadProcessModel(janusParams.inputModelLanguage1, janusParams.inputModelFile1, eventLog2.getTaskCharArchive());
this.processSpecification1.bag.initAutomataBag();
this.processSpecification2 = new ProcessModelLoader().loadProcessModel(janusParams.inputModelLanguage2, janusParams.inputModelFile2, this.processSpecification1.getTaskCharArchive());
this.processSpecification2.bag.initAutomataBag();
// this is a bit redundant, but to make sure that both have the same alphabet we recompute the first parser with the alphabet of the second, which now has both alphabets
this.processSpecification1 = new ProcessModelLoader().loadProcessModel(janusParams.inputModelLanguage1, janusParams.inputModelFile1, this.processSpecification2.getTaskCharArchive());
this.processSpecification1.bag.initAutomataBag();
}
this.discoveryParams = new PostProcessingCmdParameters();
this.discoveryParams.confidenceThreshold = 0.8;
this.discoveryParams.supportThreshold = 0.1;
this.janusViewParams = new JanusPrintParameters();
}
public JanusVariantAnalysisLauncher(JanusVariantCmdParameters janusParams, SystemCmdParameters systemParams, PostProcessingCmdParameters discoveryParams) {
this(janusParams, systemParams);
this.discoveryParams = discoveryParams;
}
public JanusVariantAnalysisLauncher(JanusVariantCmdParameters janusParams, SystemCmdParameters systemParams, PostProcessingCmdParameters discoveryParams, JanusPrintParameters janusViewParams) {
this(janusParams, systemParams, discoveryParams);
this.janusViewParams = janusViewParams;
}
/**
* Returns the logParser of a given input log
*
* @param inputLanguage file format of the input event log
* @param inputLogFile path to the input file of the event log
* @param eventClassification
* @return LogParser of the input log
*/
public static LogParser deriveLogParserFromLogFile(JanusVariantCmdParameters.LogInputEncoding inputLanguage, File inputLogFile, JanusVariantCmdParameters.EventClassification eventClassification, TaskCharArchive taskCharArchive) {
LogParser logParser = null;
switch (inputLanguage) {
case xes:
case mxml:
LogEventClassifier.ClassificationType evtClassi = fromInputParamToXesLogClassificationType(eventClassification);
try {
logParser = new XesLogParser(inputLogFile, evtClassi, taskCharArchive);
} catch (Exception e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// Let us try to free memory from the unused XesDecoder!
System.gc();
break;
case strings:
try {
logParser = new StringLogParser(inputLogFile, LogEventClassifier.ClassificationType.NAME, taskCharArchive);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
break;
default:
throw new UnsupportedOperationException("This encoding (" + inputLanguage + ") is not yet supported");
}
return logParser;
}
/**
* Returns the classification type for a given event log encoding
*
* @param evtClassInputParam
* @return
*/
public static LogEventClassifier.ClassificationType fromInputParamToXesLogClassificationType(JanusVariantCmdParameters.EventClassification evtClassInputParam) {
switch (evtClassInputParam) {
case name:
return LogEventClassifier.ClassificationType.NAME;
case logspec:
return LogEventClassifier.ClassificationType.LOG_SPECIFIED;
default:
throw new UnsupportedOperationException("Classification strategy " + evtClassInputParam + " not yet implemented");
}
}
public ProcessModel getProcessSpecification1() {
return processSpecification1;
}
public void setProcessSpecification1(ProcessModel processSpecification1) {
this.processSpecification1 = processSpecification1;
}
public ProcessModel getProcessSpecification2() {
return processSpecification2;
}
public void setProcessSpecification2(ProcessModel processSpecification2) {
this.processSpecification2 = processSpecification2;
}
public TaskCharArchive getAlphabetDecoder() {
return eventLog2.getTaskCharArchive();
}
/**
* analyse the DECLARE rules differences of the two input log with statistical guarantees
*
* @return
*/
public Map<String, Float> checkVariants() {
// 1. Rules discovery for each log
if (processSpecification1 == null || processSpecification2 == null) {
logger.info("Models discovery for input variants");
double before = System.currentTimeMillis();
ViewCmdParameters viewParams = new ViewCmdParameters();
viewParams.suppressScreenPrintOut = true;
// Variant 1 discovery
TaskCharArchive taskCharArchive1 = eventLog1.getTaskCharArchive();
JanusOfflineMinerStarter minerMinaStarter = new JanusOfflineMinerStarter();
processSpecification1 = minerMinaStarter.mine(eventLog1, taskCharArchive1, discoveryParams.supportThreshold, discoveryParams.confidenceThreshold, janusViewParams);
// Variant 1 discovered model (optional) output
OutputModelParameters model1params = new OutputModelParameters();
model1params.fileToSaveAsJSON = janusParams.fileToSaveModel1AsJSON;
model1params.fileToSaveConstraintsAsCSV = janusParams.fileToSaveModel1AsCSV;
new MinerFulOutputManagementLauncher().manageOutput(processSpecification1, viewParams, model1params, systemParams, eventLog1);
// variant 2 discovery
TaskCharArchive taskCharArchive2 = eventLog2.getTaskCharArchive();
minerMinaStarter = new JanusOfflineMinerStarter();
processSpecification2 = minerMinaStarter.mine(eventLog2, taskCharArchive2, discoveryParams.supportThreshold, discoveryParams.confidenceThreshold, janusViewParams);
// Variant 2 discovered model (optional) output
OutputModelParameters model2params = new OutputModelParameters();
model2params.fileToSaveAsJSON = janusParams.fileToSaveModel2AsJSON;
model2params.fileToSaveConstraintsAsCSV = janusParams.fileToSaveModel2AsCSV;
new MinerFulOutputManagementLauncher().manageOutput(processSpecification2, viewParams, model2params, systemParams, eventLog2);
double after = System.currentTimeMillis();
logger.info("Variants constraints discovery time: " + (after - before));
}
ReactiveVariantAnalysisCore variantAnalysisCore = new ReactiveVariantAnalysisCore(
eventLog1, processSpecification1, eventLog2, processSpecification2, janusParams, janusViewParams
);
Map<String, Float> result = variantAnalysisCore.check();
storeOriginalVariantsResults(variantAnalysisCore);
return result;
}
/**
* Stores the measurement of the original variants for external access via respective get functions
*
* @param variantAnalysisCore
*/
private void storeOriginalVariantsResults(ReactiveVariantAnalysisCore variantAnalysisCore) {
measurementsSpecification1 = variantAnalysisCore.getMeasurementsVar1(true);
measurementsSpecification2 = variantAnalysisCore.getMeasurementsVar2(true);
}
public Map<String, Float> getMeasurementsSpecification1() {
return measurementsSpecification1;
}
public Map<String, Float> getMeasurementsSpecification2() {
return measurementsSpecification2;
}
}
| 10,633 | 46.686099 | 233 | java |
Janus | Janus-master/src/minerful/JanusVariantAnalysisStarter.java | package minerful;
import minerful.concept.TaskCharArchive;
//import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.reactive.io.JanusVariantOutputManagementLauncher;
import minerful.reactive.params.JanusVariantCmdParameters;
import minerful.reactive.params.JanusPrintParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import java.util.Map;
/**
* Class to start from terminal Janus variant analysis
*/
public class JanusVariantAnalysisStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(JanusVariantAnalysisStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
// outputOptions = OutputModelParameters.parseableOptions(),
postPrOptions = PostProcessingCmdParameters.parseableOptions(),
// viewOptions = ViewCmdParameters.parseableOptions(),
janusViewOptions = JanusPrintParameters.parseableOptions(),
// chkOptions = CheckingCmdParameters.parseableOptions(),
// inputLogOptions = InputLogCmdParameters.parseableOptions(),
// inpuModlOptions = InputModelParameters.parseableOptions(),
janusOptions = JanusVariantCmdParameters.parseableOptions();
for (Object opt : systemOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
// for (Object opt : outputOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
for (Object opt : postPrOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
// for (Object opt : viewOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : chkOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : inputLogOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
// for (Object opt : inpuModlOptions.getOptions()) {
// cmdLineOptions.addOption((Option) opt);
// }
for (Object opt : janusViewOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : janusOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
JanusVariantAnalysisStarter checkStarter = new JanusVariantAnalysisStarter();
Options cmdLineOptions = checkStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
// OutputModelParameters outParams =
// new OutputModelParameters(
// cmdLineOptions,
// args);
PostProcessingCmdParameters preProcParams =
new PostProcessingCmdParameters(
cmdLineOptions,
args);
// CheckingCmdParameters chkParams =
// new CheckingCmdParameters(
// cmdLineOptions,
// args);
// InputLogCmdParameters inputLogParams =
// new InputLogCmdParameters(
// cmdLineOptions,
// args);
// InputModelParameters inpuModlParams =
// new InputModelParameters(
// cmdLineOptions,
// args);
// ViewCmdParameters viewParams =
// new ViewCmdParameters(
// cmdLineOptions,
// args);
JanusPrintParameters janusViewParams =
new JanusPrintParameters(
cmdLineOptions,
args);
JanusVariantCmdParameters janusParams =
new JanusVariantCmdParameters(
cmdLineOptions,
args);
MessagePrinter.configureLogging(systemParams.debugLevel);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
double execTimeStart = System.currentTimeMillis();
JanusVariantAnalysisLauncher variantAnalysis = new JanusVariantAnalysisLauncher(janusParams, systemParams, preProcParams, janusViewParams);
Map<String, Float> result = variantAnalysis.checkVariants();
TaskCharArchive alphabet = variantAnalysis.getAlphabetDecoder();
new JanusVariantOutputManagementLauncher().manageVariantOutput(result, janusParams, janusViewParams, systemParams, alphabet, variantAnalysis.getMeasurementsSpecification1(), variantAnalysis.getMeasurementsSpecification2());
double execTimeEnd = System.currentTimeMillis();
logger.info("Total execution time: " + (execTimeEnd - execTimeStart));
}
}
| 5,277 | 40.559055 | 231 | java |
Janus | Janus-master/src/minerful/MinerFulErrorInjectedLogMakerStarter.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package minerful;
import minerful.io.params.InputModelParameters;
import minerful.logmaker.errorinjector.ErrorInjector;
import minerful.logmaker.errorinjector.ErrorInjectorFactory;
import minerful.logmaker.errorinjector.params.ErrorInjectorCmdParameters;
import minerful.logmaker.params.LogMakerParameters;
import minerful.logparser.LogParser;
import minerful.logparser.LogTraceParser;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.stringsmaker.MinerFulStringTracesMaker;
import minerful.stringsmaker.params.StringTracesMakerCmdParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import java.io.FileWriter;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.*;
public class MinerFulErrorInjectedLogMakerStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulErrorInjectedLogMakerStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
inputModelOptions = InputModelParameters.parseableOptions(),
logMakOptions = LogMakerParameters.parseableOptions(),
errorInjectorOptions = ErrorInjectorCmdParameters.parseableOptions(),
inputLogOptions = InputLogCmdParameters.parseableOptions();
for (Object opt : systemOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : inputModelOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : logMakOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : errorInjectorOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
for (Object opt : inputLogOptions.getOptions()) {
cmdLineOptions.addOption((Option) opt);
}
return cmdLineOptions;
}
public static String[] injectErrors(String[] testBedArray, Character[] alphabet, ErrorInjectorCmdParameters errorInjexParams) {
ErrorInjectorFactory errorInjexFactory = new ErrorInjectorFactory();
ErrorInjector errorInjex = errorInjexFactory.createErrorInjector(
errorInjexParams.getErrorInjectionSpreadingPolicy(),
errorInjexParams.getErrorType(),
testBedArray);
errorInjex.setAlphabet(alphabet); //TODO take it from InputModel Options, not from traceMaker
errorInjex.setErrorsInjectionPercentage(errorInjexParams.getErrorsInjectionPercentage());
if (errorInjexParams.isTargetCharDefined())
errorInjex.setTargetChar(errorInjexParams.getTargetChar());
logger.trace(
(
"\n\n"
+ "Error injection spreading policy: " + errorInjexParams.getErrorInjectionSpreadingPolicy() + "\n"
+ "Error injection type: " + errorInjexParams.getErrorType() + "\n"
+ "Error injection percentage: " + errorInjexParams.getErrorsInjectionPercentage() + "\n"
+ "Target character: " + errorInjexParams.getTargetChar()
).replaceAll("\n", "\n\t")
);
testBedArray = errorInjex.injectErrors();
if (errorInjexParams.logFile != null) {
StringBuffer tracesBuffer = new StringBuffer();
FileWriter fileWri = null;
try {
fileWri = new FileWriter(errorInjexParams.logFile);
} catch (IOException e) {
logger.error("File writing error", e);
}
for (int i = 0; i < testBedArray.length; i++) {
tracesBuffer.append(testBedArray[i] + "\n");
}
if (tracesBuffer.length() > 0) {
try {
fileWri.write(tracesBuffer.toString());
fileWri.flush();
} catch (IOException e) {
logger.error("File writing error", e);
}
logger.info("Error-injected log file stored in: " + errorInjexParams.logFile.getAbsolutePath());
}
}
return testBedArray;
}
public static void main(String[] args) {
MinerFulErrorInjectedLogMakerStarter logMakerStarter = new MinerFulErrorInjectedLogMakerStarter();
Options cmdLineOptions = logMakerStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
InputModelParameters inputModelParams =
new InputModelParameters(
cmdLineOptions,
args);
LogMakerParameters logMakParameters =
new LogMakerParameters(
cmdLineOptions,
args);
ErrorInjectorCmdParameters errorInjexParams =
new ErrorInjectorCmdParameters(
cmdLineOptions,
args);
InputLogCmdParameters inputLogParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
if (inputModelParams.inputFile == null) {
systemParams.printHelpForWrongUsage("Input process model file missing!");
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
String[] testBedArray =new String[0];
if (inputLogParams.inputLogFile == null){
testBedArray = new MinerFulLogMakerLauncher(inputModelParams, logMakParameters, systemParams).makeLog();
}else {
logger.info("Reading input log");
List temp= new LinkedList<String>();
LogParser logParser= MinerFulMinerLauncher.deriveLogParserFromLogFile(inputLogParams);
for (Iterator<LogTraceParser> it = logParser.traceIterator(); it.hasNext(); ) {
LogTraceParser tr = it.next();
tr.init();
temp.add(tr.printStringTrace());
// System.out.println(tr.printStringTrace());
}
testBedArray= (String[]) temp.toArray(new String[temp.size()]);
}
Set<Character> tempAlphabet = new HashSet<Character>();
for (String trace:testBedArray) {
for (char event : trace.toCharArray()) {
tempAlphabet.add(event);
}
}
Character[] alphabet= tempAlphabet.toArray(new Character[tempAlphabet.size()]);
testBedArray = injectErrors(testBedArray, alphabet, errorInjexParams);
logger.debug(
"\n"
+ "[Testbed after error injection]");
for (int i = 0; i < testBedArray.length; i++) {
logger.debug(String.format("%0" + (int) (Math.ceil(Math.log10(testBedArray.length))) + "d", (i)) + ")\t" + testBedArray[i]);
}
}
} | 7,481 | 39.885246 | 136 | java |
Janus | Janus-master/src/minerful/MinerFulErrorInjectedSimuStarter.java | package minerful;
import minerful.concept.ProcessModel;
import minerful.io.params.OutputModelParameters;
import minerful.logmaker.errorinjector.params.ErrorInjectorCmdParameters;
import minerful.logparser.LogEventClassifier.ClassificationType;
import minerful.logparser.LogParser;
import minerful.logparser.StringLogParser;
import minerful.miner.params.MinerFulCmdParameters;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.stringsmaker.MinerFulStringTracesMaker;
import minerful.stringsmaker.params.StringTracesMakerCmdParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
public class MinerFulErrorInjectedSimuStarter extends MinerFulSimuStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulErrorInjectedSimuStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
minerfulOptions = MinerFulCmdParameters.parseableOptions(),
tracesMakerOptions = StringTracesMakerCmdParameters.parseableOptions(),
errorInjectorOptions = ErrorInjectorCmdParameters.parseableOptions(),
viewOptions = ViewCmdParameters.parseableOptions(),
outputOptions = OutputModelParameters.parseableOptions(),
postProptions = PostProcessingCmdParameters.parseableOptions();
for (Object opt: postProptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: systemOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: minerfulOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: tracesMakerOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: viewOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: outputOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: errorInjectorOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
/**
* @param args the command line arguments:
* [regular expression]
* [number of strings]
* [minimum number of characters per string]
* [maximum number of characters per string]
* [alphabet]...
*/
public static void main(String[] args) {
MinerFulErrorInjectedSimuStarter minErrSimuSta = new MinerFulErrorInjectedSimuStarter();
Options cmdLineOptions = minErrSimuSta.setupOptions();
ViewCmdParameters viewParams =
new ViewCmdParameters(
cmdLineOptions,
args);
StringTracesMakerCmdParameters tracesMakParams =
new StringTracesMakerCmdParameters(
cmdLineOptions,
args);
MinerFulCmdParameters minerFulParams =
new MinerFulCmdParameters(
cmdLineOptions,
args);
ErrorInjectorCmdParameters errorInjexParams =
new ErrorInjectorCmdParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
PostProcessingCmdParameters postParams = new PostProcessingCmdParameters(cmdLineOptions, args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
String[] testBedArray = new MinerFulStringTracesMaker().makeTraces(tracesMakParams);
testBedArray = MinerFulErrorInjectedTracesMakerStarter.injectErrors(testBedArray, tracesMakParams, errorInjexParams);
try {
LogParser stringLogParser = new StringLogParser(testBedArray, ClassificationType.NAME);
// minerSimuStarter.mine(testBedArray, minerFulParams, tracesMakParams, systemParams);
ProcessModel processModel = new MinerFulMinerStarter().mine(stringLogParser, minerFulParams, postParams, tracesMakParams.alphabet);
MinerFulOutputManagementLauncher proViewLauncher = new MinerFulOutputManagementLauncher();
proViewLauncher.manageOutput(processModel, viewParams, outParams, systemParams, stringLogParser);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
System.exit(1);
}
}
} | 4,758 | 37.379032 | 140 | java |
Janus | Janus-master/src/minerful/MinerFulErrorInjectedTracesMakerStarter.java | package minerful;
import java.io.FileWriter;
import java.io.IOException;
import minerful.logmaker.errorinjector.ErrorInjector;
import minerful.logmaker.errorinjector.ErrorInjectorFactory;
import minerful.logmaker.errorinjector.params.ErrorInjectorCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.stringsmaker.MinerFulStringTracesMaker;
import minerful.stringsmaker.params.StringTracesMakerCmdParameters;
import minerful.utils.MessagePrinter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.log4j.Logger;
public class MinerFulErrorInjectedTracesMakerStarter extends AbstractMinerFulStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulErrorInjectedTracesMakerStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
tracesMakOptions = StringTracesMakerCmdParameters.parseableOptions(),
errorInjectorOptions = ErrorInjectorCmdParameters.parseableOptions();
for (Object opt: systemOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: tracesMakOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: errorInjectorOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
public static String[] injectErrors(String[] testBedArray, StringTracesMakerCmdParameters tracesMakParams, ErrorInjectorCmdParameters errorInjexParams) {
ErrorInjectorFactory errorInjexFactory = new ErrorInjectorFactory();
ErrorInjector errorInjex = errorInjexFactory.createErrorInjector(
errorInjexParams.getErrorInjectionSpreadingPolicy(),
errorInjexParams.getErrorType(),
testBedArray);
errorInjex.setAlphabet(tracesMakParams.alphabet);
errorInjex.setErrorsInjectionPercentage(errorInjexParams.getErrorsInjectionPercentage());
if (errorInjexParams.isTargetCharDefined())
errorInjex.setTargetChar(errorInjexParams.getTargetChar());
logger.trace(
(
"\n\n"
+ "Error injection spreading policy: " + errorInjexParams.getErrorInjectionSpreadingPolicy() + "\n"
+ "Error injection type: " + errorInjexParams.getErrorType() + "\n"
+ "Error injection percentage: " + errorInjexParams.getErrorsInjectionPercentage() + "\n"
+ "Target character: " + errorInjexParams.getTargetChar()
).replaceAll("\n", "\n\t")
);
testBedArray = errorInjex.injectErrors();
if (errorInjexParams.logFile != null) {
StringBuffer tracesBuffer = new StringBuffer();
FileWriter fileWri = null;
try {
fileWri = new FileWriter(errorInjexParams.logFile);
} catch (IOException e) {
logger.error("File writing error", e);
}
for (int i = 0; i < testBedArray.length; i++) {
tracesBuffer.append(testBedArray[i] + "\n");
}
if (tracesBuffer.length() > 0) {
try {
fileWri.write(tracesBuffer.toString());
fileWri.flush();
} catch (IOException e) {
logger.error("File writing error", e);
}
logger.info("Error-injected log file stored in: " + errorInjexParams.logFile.getAbsolutePath());
}
}
return testBedArray;
}
public static void main(String[] args) {
MinerFulErrorInjectedTracesMakerStarter minErrTraMaker = new MinerFulErrorInjectedTracesMakerStarter();
Options cmdLineOptions = minErrTraMaker.setupOptions();
StringTracesMakerCmdParameters tracesMakParams =
new StringTracesMakerCmdParameters(
cmdLineOptions,
args);
ErrorInjectorCmdParameters errorInjexParams =
new ErrorInjectorCmdParameters(
cmdLineOptions,
args);
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
String[] testBedArray = new MinerFulStringTracesMaker().makeTraces(tracesMakParams);
testBedArray = injectErrors(testBedArray, tracesMakParams, errorInjexParams);
logger.debug(
"\n"
+ "[Testbed after error injection]");
for (int i = 0; i < testBedArray.length; i++) {
logger.debug(String.format("%0" + (int)(Math.ceil(Math.log10(testBedArray.length))) + "d", (i)) + ")\t" + testBedArray[i]);
}
}
} | 4,904 | 37.622047 | 154 | java |
Janus | Janus-master/src/minerful/MinerFulFitnessCheckLauncher.java | package minerful;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import org.processmining.plugins.declareminer.visualizing.AssignmentModel;
import minerful.checking.ProcessSpecificationFitnessEvaluator;
import minerful.checking.params.CheckingCmdParameters;
import minerful.checking.relevance.dao.ModelFitnessEvaluation;
import minerful.concept.ProcessModel;
import minerful.io.ProcessModelLoader;
import minerful.io.params.InputModelParameters;
import minerful.logparser.LogParser;
import minerful.logparser.LogTraceParser;
import minerful.miner.core.MinerFulPruningCore;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.utils.MessagePrinter;
public class MinerFulFitnessCheckLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulFitnessCheckLauncher.class);
private ProcessModel processSpecification;
private LogParser eventLogParser;
private CheckingCmdParameters chkParams;
private MinerFulFitnessCheckLauncher(CheckingCmdParameters chkParams) {
this.chkParams = chkParams;
}
public MinerFulFitnessCheckLauncher(AssignmentModel declareMapModel, LogParser inputLog, CheckingCmdParameters chkParams) {
this(chkParams);
this.processSpecification = new ProcessModelLoader().loadProcessModel(declareMapModel);
this.eventLogParser = inputLog;
}
public MinerFulFitnessCheckLauncher(ProcessModel minerFulProcessModel, LogParser inputLog, CheckingCmdParameters chkParams) {
this(chkParams);
this.processSpecification = minerFulProcessModel;
this.eventLogParser = inputLog;
}
public MinerFulFitnessCheckLauncher(InputModelParameters inputParams, PostProcessingCmdParameters preProcParams,
InputLogCmdParameters inputLogParams, CheckingCmdParameters chkParams, SystemCmdParameters systemParams) {
this(chkParams);
if (inputParams.inputFile == null) {
systemParams.printHelpForWrongUsage("Input process model file missing!");
System.exit(1);
}
// Load the process specification from the file
this.processSpecification =
new ProcessModelLoader().loadProcessModel(inputParams.inputLanguage, inputParams.inputFile);
// Apply some preliminary pruning
MinerFulPruningCore pruniCore = new MinerFulPruningCore(this.processSpecification, preProcParams);
this.processSpecification.bag = pruniCore.massageConstraints();
this.eventLogParser = MinerFulMinerLauncher.deriveLogParserFromLogFile(inputLogParams);
// Notice that the merging of event log codification of TaskChars with the given model’s one happens only late (at checking time)
MessagePrinter.configureLogging(systemParams.debugLevel);
}
public ProcessModel getProcessSpecification() {
return processSpecification;
}
public LogParser getEventLogParser() {
return eventLogParser;
}
public ModelFitnessEvaluation check() {
ProcessSpecificationFitnessEvaluator evalor = new ProcessSpecificationFitnessEvaluator(
this.eventLogParser.getEventEncoderDecoder(), this.processSpecification);
ModelFitnessEvaluation evalon = evalor.evaluateOnLog(this.eventLogParser);
reportOnEvaluation(evalon);
return evalon;
}
public ModelFitnessEvaluation check(LogTraceParser trace) {
ProcessSpecificationFitnessEvaluator evalor = new ProcessSpecificationFitnessEvaluator(
this.eventLogParser.getEventEncoderDecoder(), this.processSpecification);
ModelFitnessEvaluation evalon = evalor.evaluateOnTrace(trace);
reportOnEvaluation(evalon);
return evalon;
}
private static String printFitnessJsonSummary(ModelFitnessEvaluation evalon) {
return "{\"Avg.fitness\":"
+ MessagePrinter.formatFloatNumForCSV(evalon.avgFitness()) + ";"
+ "\"Trace-fit-ratio\":"
+ MessagePrinter.formatFloatNumForCSV(evalon.traceFitRatio())
+ "}";
}
private void reportOnEvaluation(ModelFitnessEvaluation evalon) {
if (evalon.isFullyFitting()) {
logger.info("Yay! The passed declarative process specification is fully fitting with the input traces! Summary:\n"
+ printFitnessJsonSummary(evalon) + "\n");
} else {
logger.warn(
"The passed declarative process specification is not fully fitting with the input traces. Summary:\n"
+ printFitnessJsonSummary(evalon) + "\n"
+ ((chkParams.fileToSaveResultsAsCSV == null) ?
"See below for further details." :
"See " + chkParams.fileToSaveResultsAsCSV.getAbsolutePath() + " for further details.")
);
}
if (chkParams.fileToSaveResultsAsCSV != null) {
logger.info("Saving results in CSV format as " + chkParams.fileToSaveResultsAsCSV + "...");
PrintWriter outWriter = null;
try {
outWriter = new PrintWriter(chkParams.fileToSaveResultsAsCSV);
outWriter.print(evalon.printCSV());
outWriter.flush();
outWriter.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
} | 5,041 | 36.626866 | 131 | java |
Janus | Janus-master/src/minerful/MinerFulFitnessCheckStarter.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package minerful;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import jdk.nashorn.internal.ir.CallNode.EvalArgs;
import minerful.checking.params.CheckingCmdParameters;
import minerful.checking.relevance.dao.ModelFitnessEvaluation;
import minerful.concept.ProcessModel;
import minerful.io.params.InputModelParameters;
import minerful.io.params.OutputModelParameters;
import minerful.params.InputLogCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.params.SystemCmdParameters.DebugLevel;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.utils.MessagePrinter;
public class MinerFulFitnessCheckStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulFitnessCheckStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
outputOptions = OutputModelParameters.parseableOptions(),
postPrOptions = PostProcessingCmdParameters.parseableOptions(),
viewOptions = ViewCmdParameters.parseableOptions(),
chkOptions = CheckingCmdParameters.parseableOptions(),
inputLogOptions = InputLogCmdParameters.parseableOptions(),
inpuModlOptions = InputModelParameters.parseableOptions();
for (Object opt: systemOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: outputOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: postPrOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: viewOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: chkOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: inputLogOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: inpuModlOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
MinerFulFitnessCheckStarter checkStarter = new MinerFulFitnessCheckStarter();
Options cmdLineOptions = checkStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
PostProcessingCmdParameters preProcParams =
new PostProcessingCmdParameters(
cmdLineOptions,
args);
CheckingCmdParameters chkParams =
new CheckingCmdParameters(
cmdLineOptions,
args);
InputLogCmdParameters inputLogParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
InputModelParameters inpuModlParams =
new InputModelParameters(
cmdLineOptions,
args);
ViewCmdParameters viewParams =
new ViewCmdParameters(
cmdLineOptions,
args);
MessagePrinter.configureLogging(systemParams.debugLevel);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
MinerFulFitnessCheckLauncher miFuCheLa = new MinerFulFitnessCheckLauncher(inpuModlParams, preProcParams, inputLogParams, chkParams, systemParams);
ModelFitnessEvaluation evaluationOutput = miFuCheLa.check();
ProcessModel processModel = miFuCheLa.getProcessSpecification();
new MinerFulOutputManagementLauncher().manageOutput(processModel, viewParams, outParams, systemParams);
}
} | 3,808 | 33.944954 | 154 | java |
Janus | Janus-master/src/minerful/MinerFulLogMakerLauncher.java | package minerful;
import java.io.IOException;
import org.processmining.plugins.declareminer.visualizing.AssignmentModel;
import minerful.concept.ProcessModel;
import minerful.io.ProcessModelLoader;
import minerful.io.params.InputModelParameters;
import minerful.logmaker.MinerFulLogMaker;
import minerful.logmaker.params.LogMakerParameters;
import minerful.params.SystemCmdParameters;
import minerful.utils.MessagePrinter;
/**
* Launches the generation of event logs from declarative process specifications.
*/
public class MinerFulLogMakerLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulLogMakerLauncher.class);
private ProcessModel inputProcess;
private LogMakerParameters logMakParams;
private MinerFulLogMakerLauncher(LogMakerParameters logMakParams) {
this.logMakParams = logMakParams;
}
public MinerFulLogMakerLauncher(AssignmentModel declareMapModel, LogMakerParameters logMakParams) {
this(logMakParams);
this.inputProcess = new ProcessModelLoader().loadProcessModel(declareMapModel);
}
public MinerFulLogMakerLauncher(ProcessModel minerFulProcessModel, LogMakerParameters logMakParams) {
this(logMakParams);
this.inputProcess = minerFulProcessModel;
}
public MinerFulLogMakerLauncher(InputModelParameters inputParams,
LogMakerParameters logMakParams, SystemCmdParameters systemParams) {
this(logMakParams);
this.inputProcess = new ProcessModelLoader().loadProcessModel(inputParams.inputLanguage, inputParams.inputFile);
if (inputParams.inputFile == null) {
systemParams.printHelpForWrongUsage("Input process model file missing!");
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
}
public String[] makeLog() {
if (this.logMakParams.outputLogFile == null) {
throw new IllegalArgumentException("Output file for log storage not specified!");
}
/*
* Creates the log.
*/
MinerFulLogMaker logMak = new MinerFulLogMaker(logMakParams);
logMak.createLog(this.inputProcess);
try {
logMak.storeLog();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return logMak.getCleanStringsLog();
}
} | 2,192 | 28.635135 | 114 | java |
Janus | Janus-master/src/minerful/MinerFulLogMakerStarter.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package minerful;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import minerful.io.params.InputModelParameters;
import minerful.logmaker.params.LogMakerParameters;
import minerful.params.SystemCmdParameters;
import minerful.utils.MessagePrinter;
public class MinerFulLogMakerStarter extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulLogMakerStarter.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = new Options();
Options systemOptions = SystemCmdParameters.parseableOptions(),
inputOptions = InputModelParameters.parseableOptions(),
logMakOptions = LogMakerParameters.parseableOptions();
for (Object opt: systemOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: inputOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
for (Object opt: logMakOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
MinerFulLogMakerStarter logMakerStarter = new MinerFulLogMakerStarter();
Options cmdLineOptions = logMakerStarter.setupOptions();
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
InputModelParameters inputParams =
new InputModelParameters(
cmdLineOptions,
args);
LogMakerParameters logMakParameters =
new LogMakerParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
if (inputParams.inputFile == null) {
systemParams.printHelpForWrongUsage("Input process model file missing!");
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
new MinerFulLogMakerLauncher(inputParams, logMakParameters, systemParams).makeLog();
}
} | 2,138 | 29.557143 | 97 | java |
Janus | Janus-master/src/minerful/MinerFulMinerLauncher.java | package minerful;
import org.deckfour.xes.model.XLog;
import org.processmining.plugins.declareminer.visualizing.DeclareMap;
import minerful.concept.ProcessModel;
import minerful.concept.TaskCharArchive;
import minerful.io.encdec.declaremap.DeclareMapEncoderDecoder;
import minerful.io.params.OutputModelParameters;
import minerful.logparser.LogEventClassifier.ClassificationType;
import minerful.logparser.LogParser;
import minerful.logparser.StringLogParser;
import minerful.logparser.XesLogParser;
import minerful.miner.params.MinerFulCmdParameters;
import minerful.params.InputLogCmdParameters;
import minerful.params.InputLogCmdParameters.EventClassification;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.utils.MessagePrinter;
public class MinerFulMinerLauncher {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulMinerLauncher.class);
private InputLogCmdParameters inputParams;
private MinerFulCmdParameters minerFulParams;
private SystemCmdParameters systemParams;
private PostProcessingCmdParameters postParams;
private MinerFulMinerStarter minerFulStarter;
private LogParser logParser;
private ViewCmdParameters viewParams;
private OutputModelParameters outParams;
public MinerFulMinerLauncher(InputLogCmdParameters inputParams,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams, SystemCmdParameters systemParams) {
this(inputParams, minerFulParams, postParams, systemParams, null, null);
}
public MinerFulMinerLauncher(InputLogCmdParameters inputParams,
MinerFulCmdParameters minerFulParams,
PostProcessingCmdParameters postParams, SystemCmdParameters systemParams,
ViewCmdParameters viewParams, OutputModelParameters outParams) {
this.inputParams = inputParams;
this.minerFulParams = minerFulParams;
this.systemParams = systemParams;
this.postParams = postParams;
this.viewParams = viewParams;
this.outParams = outParams;
this.minerFulStarter = new MinerFulMinerStarter();
MessagePrinter.configureLogging(systemParams.debugLevel);
}
public ProcessModel mine() {
if (inputParams.inputLogFile == null) {
MessagePrinter.printlnError("Missing input file");
System.exit(1);
}
logger.info("Loading log...");
logParser = MinerFulMinerLauncher.deriveLogParserFromLogFile(inputParams, minerFulParams);
TaskCharArchive taskCharArchive = logParser.getTaskCharArchive();
return minerFulStarter.mine(logParser, inputParams, minerFulParams, postParams, taskCharArchive);
}
public ProcessModel mine(XLog xLog) {
ClassificationType classiType = fromInputParamToXesLogClassificationType(this.inputParams.eventClassification);
logParser = new XesLogParser(xLog, classiType);
return minerFulStarter.mine(logParser, inputParams, minerFulParams, postParams, logParser.getTaskCharArchive());
}
public DeclareMap mineDeclareMap(XLog xLog) {
return new DeclareMapEncoderDecoder(mine(xLog)).createDeclareMap();
}
public static ClassificationType fromInputParamToXesLogClassificationType(EventClassification evtClassInputParam) {
switch (evtClassInputParam) {
case name:
return ClassificationType.NAME;
case logspec:
return ClassificationType.LOG_SPECIFIED;
default:
throw new UnsupportedOperationException("Classification strategy " + evtClassInputParam + " not yet implemented");
}
}
public static LogParser deriveLogParserFromLogFile(InputLogCmdParameters inputParams) {
return deriveLogParserFromLogFile(inputParams, null);
}
public static LogParser deriveLogParserFromLogFile(InputLogCmdParameters inputParams, MinerFulCmdParameters minerFulParams) {
LogParser logParser = null;
boolean doAnalyseSubLog =
!inputParams.startFromTrace.equals(InputLogCmdParameters.FIRST_TRACE_NUM)
||
!inputParams.subLogLength.equals(InputLogCmdParameters.WHOLE_LOG_LENGTH);
switch (inputParams.inputLanguage) {
case xes:
case mxml:
ClassificationType evtClassi = MinerFulMinerLauncher.fromInputParamToXesLogClassificationType(inputParams.eventClassification);
try {
if (doAnalyseSubLog) {
logParser = new XesLogParser(inputParams.inputLogFile, evtClassi, inputParams.startFromTrace, inputParams.subLogLength, null);
} else {
logParser = new XesLogParser(inputParams.inputLogFile, evtClassi);
}
} catch (Exception e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
// Remove from the analysed alphabet those activities that are
// specified in a user-defined list
if (minerFulParams != null && minerFulParams.activitiesToExcludeFromResult != null && minerFulParams.activitiesToExcludeFromResult.size() > 0) {
logParser.excludeTasksByName(minerFulParams.activitiesToExcludeFromResult);
}
// Let us try to free memory from the unused XesDecoder!
System.gc();
break;
case strings:
try {
if (doAnalyseSubLog) {
logParser = new StringLogParser(inputParams.inputLogFile, ClassificationType.NAME, inputParams.startFromTrace, inputParams.subLogLength, null);
} else {
logParser = new StringLogParser(inputParams.inputLogFile, ClassificationType.NAME);
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
break;
default:
throw new UnsupportedOperationException("This encoding ("
+ inputParams.inputLanguage + ") is not yet supported");
}
return logParser;
}
} | 5,542 | 38.035211 | 148 | java |
Janus | Janus-master/src/minerful/MinerFulMinerSlider.java | package minerful;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import minerful.concept.ProcessModel;
import minerful.concept.TaskCharArchive;
import minerful.io.ConstraintsPrinter;
import minerful.io.params.OutputModelParameters;
import minerful.logparser.LogParser;
import minerful.miner.core.MinerFulKBCore;
import minerful.miner.core.MinerFulQueryingCore;
import minerful.miner.params.MinerFulCmdParameters;
import minerful.miner.stats.GlobalStatsTable;
import minerful.params.InputLogCmdParameters;
import minerful.params.SlidingCmdParameters;
import minerful.params.SystemCmdParameters;
import minerful.params.ViewCmdParameters;
import minerful.postprocessing.params.PostProcessingCmdParameters;
import minerful.utils.MessagePrinter;
public class MinerFulMinerSlider extends MinerFulMinerStarter {
public static MessagePrinter logger = MessagePrinter.getInstance(MinerFulMinerSlider.class);
@Override
public Options setupOptions() {
Options cmdLineOptions = super.setupOptions();
Options slidingOptions = SlidingCmdParameters.parseableOptions();
for (Object opt: slidingOptions.getOptions()) {
cmdLineOptions.addOption((Option)opt);
}
return cmdLineOptions;
}
public static void main(String[] args) {
MinerFulMinerSlider minerMinaSlider = new MinerFulMinerSlider();
Options cmdLineOptions = minerMinaSlider.setupOptions();
SlidingCmdParameters slideParams =
new SlidingCmdParameters(
cmdLineOptions,
args);
InputLogCmdParameters inputParams =
new InputLogCmdParameters(
cmdLineOptions,
args);
MinerFulCmdParameters minerFulParams =
new MinerFulCmdParameters(
cmdLineOptions,
args);
ViewCmdParameters viewParams =
new ViewCmdParameters(
cmdLineOptions,
args);
OutputModelParameters outParams =
new OutputModelParameters(
cmdLineOptions,
args);
SystemCmdParameters systemParams =
new SystemCmdParameters(
cmdLineOptions,
args);
PostProcessingCmdParameters postParams =
new PostProcessingCmdParameters(
cmdLineOptions,
args);
if (systemParams.help) {
systemParams.printHelp(cmdLineOptions);
System.exit(0);
}
if (inputParams.inputLogFile == null) {
systemParams.printHelpForWrongUsage("Input log file missing!",
cmdLineOptions);
System.exit(1);
}
MessagePrinter.configureLogging(systemParams.debugLevel);
logger.info("Loading log...");
LogParser logParser = MinerFulMinerLauncher.deriveLogParserFromLogFile(inputParams,
minerFulParams);
TaskCharArchive taskCharArchive = logParser.getTaskCharArchive();
MinerFulOutputManagementLauncher minerFulOutputMgr = new MinerFulOutputManagementLauncher();
ProcessModel processModel = minerMinaSlider.slideAndMine(logParser, slideParams, inputParams, minerFulParams, postParams, taskCharArchive, minerFulOutputMgr, viewParams, outParams, systemParams);
}
public ProcessModel slideAndMine(LogParser logParser, SlidingCmdParameters slideParams, InputLogCmdParameters inputParams, MinerFulCmdParameters minerFulParams, PostProcessingCmdParameters postParams, TaskCharArchive taskCharArchive, MinerFulOutputManagementLauncher minerFulOutputMgr, ViewCmdParameters viewParams, OutputModelParameters outParams, SystemCmdParameters systemParams) {
PostProcessingCmdParameters noPostProcParams = PostProcessingCmdParameters.makeParametersForNoPostProcessing();
ProcessModel proMod = null;
int from = 0, to = 0;
proMod = ProcessModel.generateNonEvaluatedBinaryModel(taskCharArchive);
proMod.setName(makeDiscoveredProcessName(inputParams));
GlobalStatsTable
statsTable = new GlobalStatsTable(taskCharArchive, minerFulParams.branchingLimit),
globalStatsTable = null;
if (!slideParams.stickTail) {
globalStatsTable = new GlobalStatsTable(taskCharArchive, minerFulParams.branchingLimit);
}
LogParser slicedLogParser = logParser.takeASlice(inputParams.startFromTrace, inputParams.subLogLength);
statsTable = computeKB(slicedLogParser, minerFulParams,
taskCharArchive, statsTable);
if (!slideParams.stickTail) {
globalStatsTable.mergeAdditively(statsTable);
}
proMod.bag = queryForConstraints(slicedLogParser, minerFulParams,
noPostProcParams,
taskCharArchive, statsTable, proMod.bag);
int step = slideParams.slidingStep;
GlobalStatsTable slicedStatsTable = null;
MinerFulKBCore kbCore = new MinerFulKBCore(
0,
slicedLogParser,
minerFulParams, taskCharArchive);
MinerFulQueryingCore qCore = new MinerFulQueryingCore(0,
logParser, minerFulParams, noPostProcParams, taskCharArchive,
statsTable, proMod.bag);
ConstraintsPrinter cPrin = new ConstraintsPrinter(proMod);
from = inputParams.startFromTrace;
to = inputParams.startFromTrace + slicedLogParser.length();
PrintWriter outWriter = setUpCSVPrintWriter(slideParams);
outWriter.println(
"'From';'To';" +
cPrin.printBagAsMachineReadable(false,false,true).replace(
"\n", "\n" + from + ";" + to + ";").replace(
from + ";" + to + ";'",";;'"));
// The last one is to avoid, e.g., “0;297;'Support';'Confidence'” in the header
minerFulOutputMgr.setAdditionalFileSuffix(String.format("-%06d-%06d", from, to));
minerFulOutputMgr.manageOutput(proMod, viewParams, outParams, systemParams, logParser);
if (slideParams.slidingStep > 0) {
//
// In the ASCII picture below, every column is a trace.
// The '=' symbols denote the original sub-log.
// The length of the shift is denoted with '>'
// The '-' symbol denotes the traces for which entries have to be removed from the KB
// The '+' symbol indicates the traces for which entries have to be added to the KB
//
// ========>>>> === +++
// ---- ++++ --- +++
// ---- ++++ --- +++
// ---- ++++ --- +++
// ---- ++++ --- +++
// ========>>>> === >>>
//
int
subtraLen = Math.min(step, inputParams.subLogLength),
addiStartGap = (step < inputParams.subLogLength ? inputParams.subLogLength : step),
addiLen = Math.min(step, inputParams.subLogLength);
for (int i = 0; inputParams.startFromTrace + i + addiStartGap + addiLen <= logParser.wholeLength(); i += step) {
if (!slideParams.stickTail) {
slicedLogParser = logParser.takeASlice(
inputParams.startFromTrace + i,
subtraLen
);
kbCore.setLogParser(slicedLogParser);
slicedStatsTable = kbCore.discover();
// subtract the tail
statsTable.mergeSubtractively(slicedStatsTable);
}
slicedLogParser = logParser.takeASlice(inputParams.startFromTrace + i + addiStartGap, addiLen);
kbCore.setLogParser(slicedLogParser);
slicedStatsTable = kbCore.discover();
// add the head
statsTable.mergeAdditively(slicedStatsTable);
if (!slideParams.stickTail) {
globalStatsTable.mergeAdditively(slicedStatsTable);
}
// wipe out existing constraints
proMod.bag.wipeOutConstraints();
// query the altered knowledge base!
qCore.discover();
from = inputParams.startFromTrace + i + step;
to = inputParams.startFromTrace + i + addiStartGap + addiLen;
outWriter.println(
from + ";" +
to + ";" +
cPrin.printBagAsMachineReadable(false,false,false)
);
minerFulOutputMgr.setAdditionalFileSuffix(String.format("-%06d-%06d", from, to));
minerFulOutputMgr.manageOutput(proMod, viewParams, outParams, systemParams, logParser);
}
if (!slideParams.stickTail) {
proMod.bag.wipeOutConstraints();
qCore.setStatsTable(globalStatsTable);
qCore.discover();
}
}
outWriter.flush();
outWriter.close();
super.pruneConstraints(proMod, minerFulParams, postParams);
return proMod;
}
public PrintWriter setUpCSVPrintWriter(SlidingCmdParameters slideParams) {
PrintWriter outWriter = null;
try {
outWriter = new PrintWriter(slideParams.intermediateOutputCsvFile);
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.warn("Redirecting intermediate model measures to standard output");
outWriter = new PrintWriter(System.out);
}
return outWriter;
}
} | 8,424 | 33.958506 | 385 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.