repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
api
|
api-master/api/Parameters.py
|
from collections import defaultdict
from Helpers import *
import requests
import json
import time
def process(params,q):
nested_dict = lambda: defaultdict(nested_dict)
params = {k.lower(): v for k, v in params.items()} # Lowercase all parameter names passed
suggested_sort = "desc";
conditions = ["subreddit","author"]
for condition in conditions:
if condition in params and params[condition] is not None:
terms = nested_dict()
if not isinstance(params[condition], (list, tuple)):
params[condition] = [params[condition]]
params[condition] = [x.lower() for x in params[condition]]
terms['terms'][condition] = params[condition]
q['query']['bool']['filter'].append(terms)
if 'after' in params and params['after'] is not None:
if LooksLikeInt(params['after']):
params['after'] = int(params['after'])
elif params['after'][-1:].lower() == "d":
params['after'] = int(time.time()) - (int(params['after'][:-1]) * 86400)
elif params['after'][-1:].lower() == "h":
params['after'] = int(time.time()) - (int(params['after'][:-1]) * 3600)
elif params['after'][-1:].lower() == "m":
params['after'] = int(time.time()) - (int(params['after'][:-1]) * 60)
elif params['after'][-1:].lower() == "s":
params['after'] = int(time.time()) - (int(params['after'][:-1]))
range = nested_dict()
range['range']['created_utc']['gt'] = params['after']
q['query']['bool']['filter'].append(range)
suggested_sort = "asc"
else:
params['after'] = None
if 'before' in params and params['before'] is not None:
if LooksLikeInt(params['before']):
params['before'] = int(params['before'])
elif params['before'][-1:].lower() == "d":
params['before'] = int(time.time()) - (int(params['before'][:-1]) * 86400)
elif params['before'][-1:].lower() == "h":
params['before'] = int(time.time()) - (int(params['before'][:-1]) * 3600)
elif params['before'][-1:].lower() == "m":
params['before'] = int(time.time()) - (int(params['before'][:-1]) * 60)
elif params['before'][-1:].lower() == "s":
params['before'] = int(time.time()) - (int(params['before'][:-1]))
range = nested_dict()
range['range']['created_utc']['lt'] = params['before']
q['query']['bool']['filter'].append(range)
else:
params['before'] = None
if 'score' in params and params['score'] is not None:
range = nested_dict()
if params['score'][:1] == "<":
range['range']['score']['lt'] = int(params['score'][1:])
elif params['score'][:1] == ">":
range['range']['score']['gt'] = int(params['score'][1:])
elif LooksLikeInt(params['score']):
range['term']['score'] = int(params['score'])
q['query']['bool']['filter'].append(range)
if 'num_comments' in params and params['num_comments'] is not None:
range = nested_dict()
if params['num_comments'][:1] == "<":
range['range']['num_comments']['lt'] = int(params['num_comments'][1:])
elif params['num_comments'][:1] == ">":
range['range']['num_comments']['gt'] = int(params['num_comments'][1:])
elif LooksLikeInt(params['num_comments']):
range['term']['num_comments'] = int(params['num_comments'])
q['query']['bool']['filter'].append(range)
conditions = ["over_18","is_video","stickied","spoiler","locked","contest_mode"]
for condition in conditions:
if condition in params and params[condition] is not None:
parameter = nested_dict()
if params[condition].lower() == 'true' or params[condition] == "1":
parameter['term'][condition] = "true"
print ("Got here")
elif params[condition].lower() == 'false' or params[condition] == "0":
parameter['term'][condition] = "false"
q['query']['bool']['filter'].append(parameter)
if 'sort_type' in params and params['sort_type'] is not None:
params["sort_type"] = params['sort_type'].lower()
else:
params["sort_type"] = "created_utc"
if 'limit' in params:
params['size'] = params['limit']
if 'size' in params and params['size'] is not None and LooksLikeInt(params['size']):
size = 500 if int(params['size']) > 500 else int(params['size'])
q['size'] = params['size'] = size
else:
q['size'] = params['size'] = 25
if 'order' in params and params['order'] is not None:
params['sort'] = params['order'].lower()
if 'sort' in params and params['sort'] is not None:
params['sort'] = params['sort'].lower()
else:
params['sort'] = suggested_sort
q['sort'][params['sort_type']] = params['sort']
if 'frequency' in params and params['frequency'].lower() in ['second','minute','hour','day','week','month']:
params['frequency'] = params['frequency'].lower()
else:
params['frequency'] = None
return(params,q)
| 5,197 | 43.050847 | 112 |
py
|
api
|
api-master/api/api.py
|
#!/usr/bin/env python3
import falcon
import json
import requests
import time
import html
import DBFunctions
import psycopg2
import math
import redis
from pprint import pprint
from inspect import getmembers
from collections import defaultdict
import Submission
import Comment
import User
import Parameters
from Helpers import *
from configparser import ConfigParser
api = falcon.API()
api.add_route('/reddit/search', Comment.search())
api.add_route('/reddit/comment/search', Comment.search())
api.add_route('/reddit/search/comment', Comment.search())
api.add_route('/reddit/search/submission', Submission.search())
api.add_route('/reddit/submission/search', Submission.search())
api.add_route('/reddit/analyze/user/{author}', User.Analyze())
api.add_route('/get/comment_ids/{submission_id}', Submission.getCommentIDs())
api.add_route('/reddit/submission/comment_ids/{submission_id}', Submission.getCommentIDs())
| 916 | 25.970588 | 91 |
py
|
api
|
api-master/api/Comment.py
|
import time
import html
from collections import defaultdict
import Parameters
from Helpers import *
import DBFunctions
class search:
params = None
def on_get(self, req, resp):
start = time.time()
q = req.get_param('q');
self.params = req.params
if 'ids' in self.params:
data = self.getIds(self.params['ids'])
else:
data = self.doElasticSearch()
end = time.time()
data["metadata"]["execution_time_milliseconds"] = round((end - start) * 1000,2)
data["metadata"]["version"] = "v3.0"
resp.cache_control = ["public","max-age=2","s-maxage=2"]
resp.body = json.dumps(data,sort_keys=True,indent=4, separators=(',', ': '))
def getIds(self,ids):
if not isinstance(ids, (list, tuple)):
ids = [ids]
ids_to_get_from_db = []
for id in ids:
id = id.lower()
if id[:3] == "t1_":
id = id[3:]
ids_to_get_from_db.append(base36decode(id))
rows = DBFunctions.pgdb.execute("SELECT * FROM comment WHERE (json->>'id')::bigint IN %s LIMIT 5000",tuple(ids_to_get_from_db))
results = []
data = {}
if rows:
for row in rows:
comment = row[0]
comment['id'] = base36encode(comment['id'])
if 'parent_id' not in comment or comment['parent_id'] == None:
comment['parent_id'] = "t3_" + base36encode(comment['link_id'])
elif comment['parent_id'] == comment['link_id']:
comment['parent_id'] = "t3_" + base36encode(comment['link_id'])
else:
comment['parent_id'] = "t1_" + base36encode(comment['parent_id'])
if 'subreddit_id' in comment:
comment['subreddit_id'] = "t5_" + base36encode(comment['subreddit_id'])
comment['link_id'] = "t3_" + base36encode(comment['link_id'])
comment.pop('name', None)
results.append(comment)
data["data"] = results
data["metadata"] = {}
return data
def doElasticSearch(self):
response = self.search("http://mars:9200/rc/comments/_search")
results = []
data = {}
for hit in response["data"]["hits"]["hits"]:
source = hit["_source"]
source["id"] = base36encode(int(hit["_id"]))
source["link_id"] = "t3_" + base36encode(source["link_id"])
if 'parent_id' in source:
source["parent_id"] = "t1_" + base36encode(source["parent_id"])
else:
source["parent_id"] = source["link_id"]
source["subreddit_id"] = "t5_" + base36encode(source["subreddit_id"])
if 'author_flair_text' in source:
source["author_flair_text"] = html.unescape(source["author_flair_text"])
else:
source["author_flair_text"] = None
if 'author_flair_css_class' in source:
source["author_flair_css_class"] = html.unescape(source["author_flair_css_class"])
else:
source["author_flair_css_class"] = None
if 'fields' in self.params:
if isinstance(self.params['fields'], str):
self.params['fields'] = [self.params['fields']]
self.params['fields'] = [x.lower() for x in self.params['fields']]
for key in list(source):
if key.lower() not in self.params['fields']:
source.pop(key, None)
results.append(source)
if 'aggregations' in response["data"]:
data["aggs"] = {}
if 'subreddit' in response["data"]["aggregations"]:
for bucket in response["data"]["aggregations"]["subreddit"]["buckets"]:
bucket["score"] = bucket["doc_count"] / bucket["bg_count"]
newlist = sorted(response["data"]["aggregations"]["subreddit"]["buckets"], key=lambda k: k['score'], reverse=True)
data["aggs"]["subreddit"] = newlist
if 'author' in response["data"]["aggregations"]:
for bucket in response["data"]["aggregations"]["author"]["buckets"]:
if 'score' in bucket:
bucket["score"] = bucket["doc_count"] / bucket["bg_count"]
newlist = response["data"]["aggregations"]["author"]["buckets"]
data["aggs"]["author"] = newlist
if 'created_utc' in response["data"]["aggregations"]:
for bucket in response["data"]["aggregations"]["created_utc"]["buckets"]:
bucket.pop('key_as_string', None)
bucket["key"] = int(bucket["key"] / 1000)
data["aggs"]["created_utc"] = response["data"]["aggregations"]["created_utc"]["buckets"]
if 'link_id' in response["data"]["aggregations"]:
ids = []
for bucket in response["data"]["aggregations"]["link_id"]["buckets"]:
if 'score' in bucket:
bucket["score"] = bucket["doc_count"] / bucket["bg_count"]
ids.append(bucket["key"])
submission_data = getSubmissionsFromES(ids)
newlist = []
after = 0
if "after" in self.params:
after = int(self.params["after"])
for item in response["data"]["aggregations"]["link_id"]["buckets"]:
if item["key"] in submission_data and submission_data[item["key"]]["created_utc"] > after:
item["data"] = submission_data[item["key"]]
item["data"]["full_link"] = "https://www.reddit.com" + item["data"]["permalink"]
newlist.append(item)
data["aggs"]["link_id"] = newlist
data["data"] = results
data["metadata"] = {}
data["metadata"] = response["metadata"]
data["metadata"]["results_returned"] = len(response["data"]["hits"]["hits"])
data["metadata"]["timed_out"] = response["data"]["timed_out"]
data["metadata"]["total_results"] = response["data"]["hits"]["total"]
data["metadata"]["shards"] = {}
data["metadata"]["shards"] = response["data"]["_shards"]
return data
def search(self, uri):
nested_dict = lambda: defaultdict(nested_dict)
q = nested_dict()
q['query']['bool']['filter'] = []
if 'q' in self.params and self.params['q'] is not None:
sqs = nested_dict()
sqs['simple_query_string']['query'] = self.params['q']
sqs['simple_query_string']['fields'] = ['body']
sqs['simple_query_string']['default_operator'] = 'and'
q['query']['bool']['filter'].append(sqs)
self.params, q = Parameters.process(self.params,q)
min_doc_count = 0
if 'min_doc_count' in self.params and self.params['min_doc_count'] is not None and LooksLikeInt(self.params['min_doc_count']):
min_doc_count = params['min_doc_count']
if 'aggs' in self.params:
if isinstance(self.params['aggs'], str):
self.params['aggs'] = [self.params['aggs']]
for agg in list(self.params['aggs']):
if agg.lower() == 'subreddit':
q['aggs']['subreddit']['significant_terms']['field'] = "subreddit.keyword"
q['aggs']['subreddit']['significant_terms']['size'] = 1000
q['aggs']['subreddit']['significant_terms']['script_heuristic']['script']['lang'] = "painless"
q['aggs']['subreddit']['significant_terms']['script_heuristic']['script']['inline'] = "params._subset_freq"
q['aggs']['subreddit']['significant_terms']['min_doc_count'] = min_doc_count
if agg.lower() == 'author':
q['aggs']['author']['terms']['field'] = 'author.keyword'
q['aggs']['author']['terms']['size'] = 1000
q['aggs']['author']['terms']['order']['_count'] = 'desc'
if agg.lower() == 'created_utc':
q['aggs']['created_utc']['date_histogram']['field'] = "created_utc"
if self.params['frequency'] is None:
self.params['frequency'] = "day"
q['aggs']['created_utc']['date_histogram']['interval'] = self.params['frequency']
q['aggs']['created_utc']['date_histogram']['order']['_key'] = "asc"
if agg.lower() == 'link_id':
q['aggs']['link_id']['terms']['field'] = "link_id"
q['aggs']['link_id']['terms']['size'] = 250
q['aggs']['link_id']['terms']['order']['_count'] = "desc"
response = None
try:
response = requests.get("http://mars:9200/rc/comments/_search", data=json.dumps(q))
except requests.exceptions.RequestException as e:
response = requests.get("http://jupiter:9200/rc/comments/_search", data=json.dumps(q))
results = {}
results['data'] = json.loads(response.text)
results['metadata'] = {}
results['metadata']['size'] = self.params['size']
results['metadata']['sort'] = self.params['sort']
results['metadata']['sort_type'] = self.params['sort_type']
if 'after' in self.params and self.params['after'] is not None:
results['metadata']['after'] = self.params['after']
return results
| 9,637 | 46.245098 | 135 |
py
|
api
|
api-master/api/Submission.py
|
import time
import html
from collections import defaultdict
import Parameters
from Helpers import *
class search:
params = None
def on_get(self, req, resp):
self.start = time.time()
q = req.get_param('q');
self.params = req.params
if 'ids' in self.params:
data = self.getIds(self.params['ids'])
end = time.time()
data["metadata"] = {}
data["metadata"]["execution_time_milliseconds"] = round((end - self.start) * 1000,2)
data["metadata"]["version"] = "v3.0"
resp.cache_control = ["public","max-age=2","s-maxage=2"]
resp.body = json.dumps(data,sort_keys=True,indent=4, separators=(',', ': '))
return
response = self.search("http://mars:9200/rs/submissions/_search");
results = []
data = {}
for hit in response["data"]["hits"]["hits"]:
source = hit["_source"]
source["id"] = base36encode(int(hit["_id"]))
if 'subreddit_id' in source and source["subreddit_id"] is not None and LooksLikeInt(source["subreddit_id"]):
source["subreddit_id"] = "t5_" + base36encode(source["subreddit_id"])
else:
source["subreddit_id"] = None
if 'author_flair_text' in source:
source["author_flair_text"] = html.unescape(source["author_flair_text"])
else:
source["author_flair_text"] = None
if 'author_flair_css_class' in source:
source["author_flair_css_class"] = html.unescape(source["author_flair_css_class"])
else:
source["author_flair_css_class"] = None
if source["permalink"]:
source["full_link"] = "https://www.reddit.com" + source["permalink"]
if 'fields' in self.params:
if isinstance(self.params['fields'], str):
self.params['fields'] = [self.params['fields']]
self.params['fields'] = [x.lower() for x in self.params['fields']]
for key in list(source):
if key.lower() not in self.params['fields']:
source.pop(key, None)
results.append(source)
if 'aggregations' in response["data"]:
data['aggs'] = {}
if 'subreddit' in response['data']['aggregations']:
for bucket in response['data']['aggregations']['subreddit']['buckets']:
bucket['score'] = round(bucket['doc_count'] / bucket['bg_count'],5)
newlist = sorted(response['data']['aggregations']['subreddit']['buckets'], key=lambda k: k['score'], reverse=True)
data['aggs']['subreddit'] = newlist
if 'author' in response['data']['aggregations']:
for bucket in response['data']['aggregations']['author']['buckets']:
if 'score' in bucket:
bucket['score'] = bucket['doc_count'] / bucket['bg_count']
newlist = response['data']['aggregations']['author']['buckets']
data['aggs']['author'] = newlist
if 'created_utc' in response['data']['aggregations']:
for bucket in response['data']['aggregations']['created_utc']['buckets']:
bucket.pop('key_as_string', None)
bucket['key'] = int(bucket['key'] / 1000)
data['aggs']['created_utc'] = response['data']['aggregations']['created_utc']['buckets']
if 'domain' in response['data']['aggregations']:
newBuckets = []
for bucket in response['data']['aggregations']['domain']['buckets']:
if 'self.' in bucket['key']:
continue
else:
newBuckets.append(bucket)
data['aggs']['domain'] = newBuckets
if 'time_of_day' in response['data']['aggregations']:
for bucket in response['data']['aggregations']['time_of_day']['buckets']:
bucket['bg_percentage'] = round(bucket['bg_count'] * 100 / response['data']['aggregations']['time_of_day']['bg_count'], 5)
bucket['doc_percentage'] = round(bucket['doc_count'] * 100 / response['data']['aggregations']['time_of_day']['doc_count'], 5)
bucket['deviation_percentage'] = round(bucket['doc_percentage'] - bucket['bg_percentage'],4)
bucket['utc_hour'] = bucket['key']
bucket.pop('score', None)
bucket.pop('key',None)
newlist = sorted(response['data']['aggregations']['time_of_day']['buckets'], key=lambda k: k['utc_hour'])
data['aggs']['time_of_day'] = newlist
end = time.time()
data['data'] = results;
data['metadata'] = {}
data['metadata'] = response['metadata']
data['metadata'] = self.params
data['metadata']['execution_time_milliseconds'] = round((end - self.start) * 1000,2)
data['metadata']['version'] = 'v3.0'
data['metadata']['results_returned'] = len(response['data']['hits']['hits'])
data['metadata']['timed_out'] = response['data']['timed_out']
data['metadata']['total_results'] = response['data']['hits']['total']
data['metadata']['shards'] = {}
data['metadata']['shards'] = response['data']['_shards']
resp.cache_control = ['public','max-age=2','s-maxage=2']
resp.body = json.dumps(data,sort_keys=True,indent=4, separators=(',', ': '))
def search(self, uri):
nested_dict = lambda: defaultdict(nested_dict)
q = nested_dict()
q['query']['bool']['filter'] = []
q['query']['bool']['must_not'] = []
self.params, q = Parameters.process(self.params,q)
if 'q' in self.params and self.params['q'] is not None:
sqs = nested_dict()
sqs['simple_query_string']['query'] = self.params['q']
sqs['simple_query_string']['default_operator'] = 'and'
q['query']['bool']['filter'].append(sqs)
conditions = ["title","selftext"]
for condition in conditions:
if condition in self.params and self.params[condition] is not None:
sqs = nested_dict()
sqs['simple_query_string']['query'] = self.params[condition]
sqs['simple_query_string']['fields'] = [condition]
sqs['simple_query_string']['default_operator'] = 'and'
q['query']['bool']['filter'].append(sqs)
not_conditions = ["title:not", "q:not", "selftext:not"]
for condition in not_conditions:
if condition in self.params and self.params[condition] is not None:
sqs = nested_dict()
sqs['simple_query_string']['query'] = self.params[condition]
if condition != 'q:not':
sqs['simple_query_string']['fields'] = [condition.split(":")[0]]
sqs['simple_query_string']['default_operator'] = 'and'
q['query']['bool']['must_not'].append(sqs)
min_doc_count = 0
if 'min_doc_count' in self.params and self.params['min_doc_count'] is not None and LooksLikeInt(self.params['min_doc_count']):
min_doc_count = self.params['min_doc_count']
if 'aggs' in self.params:
if isinstance(self.params['aggs'], str):
self.params['aggs'] = [self.params['aggs']]
for agg in list(self.params['aggs']):
if agg.lower() == 'subreddit':
q['aggs']['subreddit']['significant_terms']['field'] = 'subreddit.keyword'
q['aggs']['subreddit']['significant_terms']['size'] = 1000
q['aggs']['subreddit']['significant_terms']['script_heuristic']['script']['lang'] = 'painless'
q['aggs']['subreddit']['significant_terms']['script_heuristic']['script']['inline'] = 'params._subset_freq'
q['aggs']['subreddit']['significant_terms']['min_doc_count'] = min_doc_count
if agg.lower() == 'author':
q['aggs']['author']['terms']['field'] = 'author.keyword'
q['aggs']['author']['terms']['size'] = 1000
q['aggs']['author']['terms']['order']['_count'] = 'desc'
#q['aggs']['author']['significant_terms']['script_heuristic']['script']['lang'] = 'painless'
#q['aggs']['author']['significant_terms']['script_heuristic']['script']['inline'] = 'params._subset_freq'
#q['aggs']['author']['significant_terms']['min_doc_count'] = min_doc_count
if agg.lower() == 'created_utc':
q['aggs']['created_utc']['date_histogram']['field'] = 'created_utc'
if self.params['frequency'] is None:
self.params['frequency'] = "day"
q['aggs']['created_utc']['date_histogram']['interval'] = self.params['frequency']
q['aggs']['created_utc']['date_histogram']['order']['_key'] = 'asc'
if agg.lower() == 'domain':
q['aggs']['domain']['terms']['field'] = 'domain.keyword'
q['aggs']['domain']['terms']['size'] = 1000
q['aggs']['domain']['terms']['order']['_count'] = 'desc'
if agg.lower() == 'time_of_day':
q['aggs']['time_of_day']['significant_terms']['field'] = 'hour'
q['aggs']['time_of_day']['significant_terms']['size'] = 25
q['aggs']['time_of_day']['significant_terms']['percentage']
response = requests.get(uri, data=json.dumps(q))
results = {}
results['data'] = json.loads(response.text)
results['metadata'] = {}
results['metadata']['sort'] = self.params['sort']
results['metadata']['sort_type'] = self.params['sort_type']
return results
def getIds(self, ids):
nested_dict = lambda: defaultdict(nested_dict)
if not isinstance(ids, (list, tuple)):
ids = [ids]
ids_to_fetch = []
for id in ids:
id = id.lower()
if id[:3] == "t3_":
id = id[3:]
ids_to_fetch.append(base36decode(id))
q = nested_dict()
q["query"]["terms"]["id"] = ids_to_fetch
q["size"] = 500
response = requests.get("http://mars:9200/rs/submissions/_search", data=json.dumps(q))
s = json.loads(response.text)
results = []
for hit in s["hits"]["hits"]:
source = hit["_source"]
base_10_id = source["id"]
source["id"] = base36encode(int(hit["_id"]))
if 'subreddit_id' in source:
source['subreddit_id'] = "t5_" + base36encode(source['subreddit_id'])
source["full_link"] = "https://www.reddit.com" + source["permalink"]
if 'fields' in self.params:
if isinstance(self.params['fields'], str):
self.params['fields'] = [self.params['fields']]
self.params['fields'] = [x.lower() for x in self.params['fields']]
for key in list(source):
if key.lower() not in self.params['fields']:
source.pop(key, None)
results.append(source)
data = {}
data["data"] = results
data["metadata"] = {}
return data
class getCommentIDs:
def on_get(self, req, resp, submission_id):
submission_id = submission_id.lower()
if submission_id[:3] == 't3_':
submission_id = submission_id[3:]
submission_id = base36decode(submission_id)
rows = DBFunctions.pgdb.execute("SELECT (json->>'id')::bigint comment_id FROM comment WHERE (json->>'link_id')::int = %s ORDER BY comment_id ASC LIMIT 50000",submission_id)
results = []
data = {}
if rows:
for row in rows:
comment_id = row[0]
results.append(base36encode(comment_id))
data['data'] = results;
resp.cache_control = ["public","max-age=5","s-maxage=5"]
resp.body = json.dumps(data,sort_keys=True,indent=4, separators=(',', ': '))
| 12,362 | 48.25498 | 180 |
py
|
api
|
api-master/api/User.py
|
import time
import html
from collections import defaultdict
import Parameters
from Helpers import *
class Analyze:
def on_get(self, req, resp, author):
start = time.time()
params = req.params
searchURL = 'http://mars:9200/rc/comments/_search'
nested_dict = lambda: defaultdict(nested_dict)
q = nested_dict()
size = 2500
sort_direction = 'desc'
q['query']['bool']['filter'] = []
q['size'] = size
if author is not None:
terms = nested_dict()
terms['terms']['author'] = [author.lower()]
q['query']['bool']['filter'].append(terms)
q['size'] = size
q['sort']['created_utc'] = sort_direction
q['aggs']['created_utc']['date_histogram']['field'] = 'created_utc'
q['aggs']['created_utc']['date_histogram']['interval'] = "day"
q['aggs']['created_utc']['date_histogram']['order']['_key'] = 'asc'
q['aggs']['subreddit']['terms']['field'] = 'subreddit.keyword'
q['aggs']['subreddit']['terms']['size'] = size
q['aggs']['subreddit']['terms']['order']['_count'] = 'desc'
q['aggs']['link_id']['terms']['field'] = 'link_id'
q['aggs']['link_id']['terms']['size'] = 25
q['aggs']['link_id']['terms']['order']['_count'] = 'desc'
request = requests.get(searchURL, data=json.dumps(q))
response = json.loads(request.text)
if response.get('aggregations', {}).get('link_id', {}).get('buckets',{}):
for row in response['aggregations']['link_id']['buckets']:
row['key'] = 't3_' + base36encode(row['key'])
end = time.time()
data = {}
data['data'] = response
data['metadata'] = {}
data['metadata']['execution_time_milliseconds'] = round((end - start) * 1000,2)
data['metadata']['version'] = 'v3.0'
resp.cache_control = ['public','max-age=2','s-maxage=2']
resp.body = json.dumps(data,sort_keys=True,indent=4, separators=(',', ': '))
| 2,039 | 34.172414 | 87 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/train_AACVPMVSNet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/3 0016 11:52
# @Author : Anzhu Yu
# @Site :
# @File : train_AACVPMVSNet.py
# @Software: PyCharm
# some packages used in this project
from argsParser import getArgsParser, checkArgs
import os
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from datasets import dtu_loader
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import time
from tensorboardX import SummaryWriter
from datasets import find_dataset_def
from models import *
from utils import *
import gc
import sys
import datetime
import torch.utils
import torch.utils.checkpoint
from torchscan import summary
# CUDA_LAUNCH_BLOCKING=1
parser = getArgsParser()
args = parser.parse_args()
assert args.mode == "train", 'HERE IS THE TRAINING MODE!'
checkArgs(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
curTime = time.strftime('%Y%m%d-%H%M', time.localtime(time.time()))
log_path = args.loggingdir + args.info.replace(" ", "_") + "/"
if not os.path.isdir(args.loggingdir):
os.mkdir(args.loggingdir)
if not os.path.isdir(log_path):
os.mkdir(log_path)
log_name = log_path + curTime + '.log'
logfile = log_name
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fileHandler = logging.FileHandler(logfile, mode='a')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.info("Logger initialized.")
logger.info("Writing logs to file:" + logfile)
settings_str = "All settings:\n"
line_width = 20
for k, v in vars(args).items():
settings_str += '{0}: {1}\n'.format(k, v)
logger.info(settings_str)
# Read the Data,
train_dataset = dtu_loader.MVSDataset(args, logger)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=16, drop_last=True)
# Build the model
model = AACVPMVSNet(args, group=args.groups, num_heads=args.num_heads)
# Use the cuda_ids to determine the GPUs used for experiments
device_id_list = [int(idd) for idd in args.cuda_ids.split(',')]
if len(device_id_list) == 1 and (device_id_list[0] != 666):
print("Now multi-GPUs mode activated!")
device_ids = [int(args.cuda_ids)]
elif (device_id_list[0] == 666):
model = model.cpu()
else:
device_ids = device_id_list
del device_id_list
# GPUs
if args.mode == "train" and torch.cuda.is_available():
model = nn.DataParallel(model, device_ids=device_ids, output_device=device_ids[0])
if torch.cuda.is_available():
model.cuda()
model.train()
if args.loss_function == "sl1":
logger.info("Using smoothed L1 loss")
model_loss = sL1_loss
else: # MSE
logger.info("Using MSE loss")
model_loss = MSE_loss
logger.info(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
# model_loss = mvsnet_loss
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.wd)
# Start at a given checkpoint
sw_path = args.logckptdir + args.info + "/"
start_epoch = 0
if (args.mode == "train" and args.resume) or (args.mode == "test" and not args.loadckpt):
logger.info("Resuming or testing...")
saved_models = [fn for fn in os.listdir(sw_path) if fn.endswith(".ckpt")]
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
# use the latest checkpoint file
loadckpt = os.path.join(sw_path, saved_models[-1])
logger.info("Resuming " + loadckpt)
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load checkpoint file specified by args.loadckpt
logger.info("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])
print("start at epoch {}".format(start_epoch))
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# Training at each epoch
def train():
milestones = [int(epoch_idx) for epoch_idx in args.lrepochs.split(':')[0].split(',')]
lr_gamma = 1 / float(args.lrepochs.split(':')[1])
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=lr_gamma,
last_epoch=start_epoch - 1)
# epoch stat
last_loss = None
this_loss = None
for epoch_idx in range(start_epoch, args.epochs):
logger.info('Epoch {}:'.format(epoch_idx))
global_step = len(train_loader) * epoch_idx
if last_loss is None:
last_loss = 999999
else:
last_loss = this_loss
this_loss = []
for batch_idx, sample in enumerate(train_loader):
start_time = time.time()
global_step = len(train_loader) * epoch_idx + batch_idx
do_summary = global_step % args.summary_freq == 0
loss = train_sample(sample, detailed_summary=do_summary)
this_loss.append(loss)
logger.info(
'Epoch {}/{}, Iter {}/{}, train loss = {:.3f}, time = {:.3f}'.format(epoch_idx, args.epochs, batch_idx,
len(train_loader), loss,
time.time() - start_time))
# checkpoint
if (epoch_idx + 1) % args.save_freq == 0:
torch.save({
'epoch': epoch_idx,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()},
"{}/model_{:0>6}.ckpt".format(args.logckptdir + args.info.replace(" ", "_"), epoch_idx))
logger.info("model_{:0>6}.ckpt saved".format(epoch_idx))
this_loss = np.mean(this_loss)
logger.info("Epoch loss: {:.5f} --> {:.5f}".format(last_loss, this_loss))
lr_scheduler.step()
# Training for each batch
def train_sample(sample, detailed_summary=False):
"""
:param sample: each batch
:param detailed_summary: whether the detailed logs are needed.
:return: the loss
"""
# model.train() is not needed here, however it is often used to state this script is not for evaluation.
model.train()
optimizer.zero_grad()
sample_cuda = tocuda(sample)
ref_depths = sample_cuda["ref_depths"]
# forward
outputs = model(sample_cuda["ref_img"].float(), sample_cuda["src_imgs"].float(), sample_cuda["ref_intrinsics"],
sample_cuda["src_intrinsics"], sample_cuda["ref_extrinsics"], sample_cuda["src_extrinsics"],
sample_cuda["depth_min"], sample_cuda["depth_max"])
depth_est_list = outputs["depth_est_list"]
dHeight = ref_depths.shape[2]
dWidth = ref_depths.shape[3]
loss = []
for i in range(0, args.nscale):
# generate the masks.
depth_gt = ref_depths[:, i, :int(dHeight / 2 ** i), :int(dWidth / 2 ** i)]
mask = depth_gt > 425
loss.append(model_loss(depth_est_list[i], depth_gt.float(), mask))
loss = sum(loss)
loss.backward()
optimizer.step()
return loss.data.cpu().item()
# main function, the start of this program
if __name__ == '__main__':
if args.mode == "train":
train()
| 7,685 | 33.466368 | 119 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/eval_AACVPMVSNet.py
|
# Evaluate AACVP-MVSNet
# Modified by: Bing Liu
import os, sys, time, logging, argparse, datetime, re
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import dtu_loader
from models import *
# from models.modules import *
from utils import *
from PIL import Image
from argsParser import getArgsParser
from plyfile import PlyData, PlyElement
# Debug import
import pdb
import matplotlib.pyplot as plt
cudnn.benchmark = True
# Arg parser
parser = getArgsParser()
args = parser.parse_args()
assert args.mode == "test"
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
curTime = time.strftime('%Y%m%d-%H%M', time.localtime(time.time()))
log_path = args.loggingdir + args.info.replace(" ", "_") + "/"
if not os.path.isdir(args.loggingdir):
os.mkdir(args.loggingdir)
if not os.path.isdir(log_path):
os.mkdir(log_path)
log_name = log_path + curTime + '.log'
logfile = log_name
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fileHandler = logging.FileHandler(logfile, mode='a')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
logger.info("Logger initialized.")
logger.info("Writing logs to file:" + logfile)
settings_str = "All settings:\n"
line_width = 30
for k, v in vars(args).items():
settings_str += '{0}: {1}\n'.format(k, v)
logger.info(settings_str)
# Run AACVP-MVSNet to save depth maps and confidence maps
def save_depth():
# dataset, dataloader
test_dataset = dtu_loader.MVSDataset(args, logger)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=args.eval_shuffle, num_workers=16, drop_last=True)
model = AACVPMVSNet(args, group = args.groups, num_heads = args.num_heads)
device_ids = [0]
model = nn.DataParallel(model, device_ids=device_ids, output_device=device_ids[0])
model.cuda()
logger.info("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
with torch.no_grad():
for batch_idx, sample in enumerate(test_loader):
start_time = time.time()
sample_cuda = tocuda(sample)
torch.cuda.empty_cache()
outputs = model( \
sample_cuda["ref_img"].float(), \
sample_cuda["src_imgs"].float(), \
sample_cuda["ref_intrinsics"], \
sample_cuda["src_intrinsics"], \
sample_cuda["ref_extrinsics"], \
sample_cuda["src_extrinsics"], \
sample_cuda["depth_min"], \
sample_cuda["depth_max"])
depth_est_list = outputs["depth_est_list"]
depth_est = depth_est_list[0].data.cpu().numpy()
prob_confidence = outputs["prob_confidence"].data.cpu().numpy()
del sample_cuda
filenames = sample["filename"]
logger.info('Iter {}/{}, time = {:.3f}'.format(batch_idx, len(test_loader), time.time() - start_time))
# save depth maps and confidence maps
for filename, est_depth, photometric_confidence in zip(filenames, depth_est, prob_confidence):
# print(depth_est.shape, prob_confidence.shape)
depth_filename = os.path.join(args.outdir, filename.format('depth_est', '.pfm'))
confidence_filename = os.path.join(args.outdir, filename.format('confidence', '.pfm'))
os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True)
os.makedirs(confidence_filename.rsplit('/', 1)[0], exist_ok=True)
# save depth maps
save_pfm(depth_filename, est_depth)
write_depth_img(depth_filename + ".png", est_depth)
# Save prob maps
save_pfm(confidence_filename, photometric_confidence)
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def read_camera_parameters(filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
return intrinsics, extrinsics
def read_pair_file(filename):
data = []
with open(filename) as f:
num_viewpoint = int(f.readline())
# 49 viewpoints
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
data.append((ref_view, src_views))
return data
# read an image
def read_img(filename):
img = Image.open(filename)
# Crop image (For DTU only)
left = 0
top = 0
right = 1600
bottom = 1184
img = img.crop((left, top, right, bottom))
# scale 0~255 to 0~1
np_img = np.array(img, dtype=np.uint8)
return np_img
# read a binary mask
def read_mask(filename):
return read_img(filename) > 0.5
# save a binary mask
def save_mask(filename, mask):
assert mask.dtype == np.bool
mask = mask.astype(np.uint8) * 255
Image.fromarray(mask).save(filename)
def save_pfm(filename, image, scale=1):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filename, "wb")
color = None
image = np.flipud(image)
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
# print((image.shape))
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(('%f\n' % scale).encode('utf-8'))
image.tofile(file)
file.close()
def write_depth_img(filename, depth):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
image = Image.fromarray((depth - 500) / 2).convert("L")
image.save(filename)
return 1
# project the reference point cloud into the source view, then project back
def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
## step1. project reference pixels to the source view
# reference view x, y
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),
np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),
np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]
# source view x, y
K_xyz_src = np.matmul(intrinsics_src, xyz_src)
xy_src = K_xyz_src[:2] / K_xyz_src[2:3]
## step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).astype(np.float32)
y_src = xy_src[1].reshape([height, width]).astype(np.float32)
sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
# mask = sampled_depth_src > 0
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = np.matmul(np.linalg.inv(intrinsics_src),
np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),
np.vstack((xyz_src, np.ones_like(x_ref))))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)
K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3]
x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)
return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src
def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):
width, height = depth_ref.shape[1], depth_ref.shape[0]
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src = reproject_with_depth(depth_ref,
intrinsics_ref,
extrinsics_ref,
depth_src,
intrinsics_src,
extrinsics_src)
# check |p_reproj-p_1| < 1
dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproj-d_1| / d_1 < 0.01
depth_diff = np.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
mask = np.logical_and(dist < 0.5, relative_depth_diff < 0.01)
depth_reprojected[~mask] = 0
return mask, depth_reprojected, x2d_src, y2d_src
def filter_depth(dataset_root, scan, out_folder, plyfilename):
print("Starting fusion for:" + out_folder)
# the pair file
pair_file = os.path.join(dataset_root, 'Cameras/pair.txt')
# for the final point cloud
vertexs = []
vertex_colors = []
pair_data = read_pair_file(pair_file)
nviews = len(pair_data)
# for each reference view and the corresponding source views
for ref_view, src_views in pair_data:
# load the camera parameters
ref_intrinsics, ref_extrinsics = read_camera_parameters(
os.path.join(dataset_root, 'Cameras/{:0>8}_cam.txt'.format(ref_view)))
# load the reference image
ref_img = read_img(os.path.join(dataset_root, "Rectified", scan,
'rect_{:03d}_3_r5000.png'.format(ref_view + 1))) # Image start from 1.
# load the estimated depth of the reference view
ref_depth_est, scale = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view)))
# load the photometric mask of the reference view
confidence, scale = read_pfm(os.path.join(out_folder, 'confidence/{:0>8}.pfm'.format(ref_view)))
photo_mask = confidence > 0.9
all_srcview_depth_ests = []
all_srcview_x = []
all_srcview_y = []
all_srcview_geomask = []
# compute the geometric mask
geo_mask_sum = 0
for src_view in src_views:
# camera parameters of the source view
src_intrinsics, src_extrinsics = read_camera_parameters(
os.path.join(dataset_root, 'Cameras/{:0>8}_cam.txt'.format(src_view)))
# the estimated depth of the source view
src_depth_est, scale = read_pfm(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view)))
geo_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency(ref_depth_est, ref_intrinsics,
ref_extrinsics,
src_depth_est,
src_intrinsics, src_extrinsics)
geo_mask_sum += geo_mask.astype(np.int32)
all_srcview_depth_ests.append(depth_reprojected)
all_srcview_x.append(x2d_src)
all_srcview_y.append(y2d_src)
all_srcview_geomask.append(geo_mask)
depth_est_averaged = (sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1)
# at least 3 source views matched
geo_mask = geo_mask_sum >= 3
final_mask = np.logical_and(photo_mask, geo_mask)
os.makedirs(os.path.join(out_folder, "mask"), exist_ok=True)
save_mask(os.path.join(out_folder, "mask/{:0>8}_photo.png".format(ref_view)), photo_mask)
save_mask(os.path.join(out_folder, "mask/{:0>8}_geo.png".format(ref_view)), geo_mask)
save_mask(os.path.join(out_folder, "mask/{:0>8}_final.png".format(ref_view)), final_mask)
print("processing {}, ref-view{:0>2}, photo/geo/final-mask:{}/{}/{}".format(scan, ref_view,
photo_mask.mean(),
geo_mask.mean(), final_mask.mean()))
height, width = depth_est_averaged.shape[:2]
x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
# valid_points = np.logical_and(final_mask, ~used_mask[ref_view])
valid_points = final_mask
print("valid_points", valid_points.mean())
x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[valid_points]
ref_img = np.array(ref_img)
color = ref_img[valid_points]
xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics),
np.vstack((x, y, np.ones_like(x))) * depth)
xyz_world = np.matmul(np.linalg.inv(ref_extrinsics),
np.vstack((xyz_ref, np.ones_like(x))))[:3]
vertexs.append(xyz_world.transpose((1, 0)))
vertex_colors.append((color).astype(np.uint8))
vertexs = np.concatenate(vertexs, axis=0)
vertex_colors = np.concatenate(vertex_colors, axis=0)
vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)
for prop in vertexs.dtype.names:
vertex_all[prop] = vertexs[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, 'vertex')
print("Saving the final model to", plyfilename)
PlyData([el], comments=['Model created by AACVP-MVSNet.']).write(plyfilename)
print("Model saved.")
if __name__ == '__main__':
# Inference depth maps
save_depth()
# Next: using the fusibile toolbox for depth map fusion and 3D reconstruction
| 16,769 | 38.833729 | 120 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/6/28 0028 11:55
# @Author : Anzhu Yu
# @Site :
# @File : utils.py
# @Software: PyCharm
import numpy as np
import torchvision.utils as vutils
import torch
import torch.nn.functional as F
# print arguments
def print_args(args):
print("################################ args ################################")
for k, v in args.__dict__.items():
print("{0: <10}\t{1: <30}\t{2: <20}".format(k, str(v), str(type(v))))
print("########################################################################")
# torch.no_grad warpper for functions
def make_nograd_func(func):
def wrapper(*f_args, **f_kwargs):
with torch.no_grad():
ret = func(*f_args, **f_kwargs)
return ret
return wrapper
# convert a function into recursive style to handle nested dict/list/tuple variables
def make_recursive_func(func):
def wrapper(vars):
if isinstance(vars, list):
return [wrapper(x) for x in vars]
elif isinstance(vars, tuple):
return tuple([wrapper(x) for x in vars])
elif isinstance(vars, dict):
return {k: wrapper(v) for k, v in vars.items()}
else:
return func(vars)
return wrapper
@make_recursive_func
def tensor2float(vars):
if isinstance(vars, float):
return vars
elif isinstance(vars, torch.Tensor):
return vars.data.item()
else:
raise NotImplementedError("invalid input type {} for tensor2float".format(type(vars)))
@make_recursive_func
def tensor2numpy(vars):
if isinstance(vars, np.ndarray):
return vars
elif isinstance(vars, torch.Tensor):
return vars.detach().cpu().numpy().copy()
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
@make_recursive_func
def tocuda(vars):
if isinstance(vars, torch.Tensor):
return vars.cuda()
elif isinstance(vars, str):
return vars
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars)))
def save_scalars(logger, mode, scalar_dict, global_step):
scalar_dict = tensor2float(scalar_dict)
for key, value in scalar_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_scalar(name, value, global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_scalar(name, value[idx], global_step)
def save_images(logger, mode, images_dict, global_step):
images_dict = tensor2numpy(images_dict)
def preprocess(name, img):
if not (len(img.shape) == 3 or len(img.shape) == 4):
raise NotImplementedError("invalid img shape {}:{} in save_images".format(name, img.shape))
if len(img.shape) == 3:
img = img[:, np.newaxis, :, :]
img = torch.from_numpy(img[:1])
return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True)
for key, value in images_dict.items():
if not isinstance(value, (list, tuple)):
name = '{}/{}'.format(mode, key)
logger.add_image(name, preprocess(name, value), global_step)
else:
for idx in range(len(value)):
name = '{}/{}_{}'.format(mode, key, idx)
logger.add_image(name, preprocess(name, value[idx]), global_step)
class DictAverageMeter(object):
def __init__(self):
self.data = {}
self.count = 0
def update(self, new_input):
self.count += 1
if len(self.data) == 0:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] = v
else:
for k, v in new_input.items():
if not isinstance(v, float):
raise NotImplementedError("invalid data {}: {}".format(k, type(v)))
self.data[k] += v
def mean(self):
return {k: v / self.count for k, v in self.data.items()}
# a wrapper to compute metrics for each image individually
def compute_metrics_for_each_image(metric_func):
def wrapper(depth_est, depth_gt, mask, *args):
batch_size = depth_gt.shape[0]
results = []
# compute result one by one
for idx in range(batch_size):
ret = metric_func(depth_est[idx], depth_gt[idx], mask[idx], *args)
results.append(ret)
return torch.stack(results).mean()
return wrapper
@make_nograd_func
@compute_metrics_for_each_image
def Thres_metrics(depth_est, depth_gt, mask, thres):
assert isinstance(thres, (int, float))
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
errors = torch.abs(depth_est - depth_gt)
err_mask = errors > thres
return torch.mean(err_mask.float())
# NOTE: please do not use this to build up training loss
@make_nograd_func
@compute_metrics_for_each_image
def AbsDepthError_metrics(depth_est, depth_gt, mask):
depth_est, depth_gt = depth_est[mask], depth_gt[mask]
return torch.mean((depth_est - depth_gt).abs())
| 5,284 | 31.623457 | 103 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/argsParser.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/6/29 0028 11:52
# @Author : Anzhu Yu (Modified). The original
# @Site :
# @File : train.py
# @Software: PyCharm
import argparse
def getArgsParser():
parser = argparse.ArgumentParser(description='AACVP-MVSNet. Thanks J.Yang and X.Guo for sharing such good projects as references.')
# The 'general settings' and 'training settings' are the same with those of J.Yang for fair comparson,
# while the 'epoch' parameter is set to 40 instead of 28.
# General settings
parser.add_argument('--info', default='None', help='Info about current run')
parser.add_argument('--mode', default='train', help='train or test ro validation', choices=['train', 'test', 'val'])
parser.add_argument('--dataset_root', default='./datasets/dataset/dtu-train-128/',help='path to dataset root')
parser.add_argument('--imgsize', type=int, default=128, choices=[128,1200], help='height of input image')
parser.add_argument('--nsrc', type=int, default=3, help='number of src views to use')
parser.add_argument('--nscale', type=int, default=7, help='number of scales to use')
# Training settings
parser.add_argument('--epochs', type=int, default=40, help='number of epochs to train')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--lrepochs', type=str, default="10,12,14,20:2", help='epoch ids to downscale lr and the downscale rate')
parser.add_argument('--wd', type=float, default=0.0, help='weight decay')
parser.add_argument('--batch_size', type=int, default=1, help='train batch size')
parser.add_argument('--summary_freq', type=int, default=1, help='print and summary frequency')
parser.add_argument('--save_freq', type=int, default=1, help='save checkpoint frequency')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument('--loss_function', default='sl1', help='which loss function to use', choices=['sl1','mse'])
# Checkpoint settings
parser.add_argument('--loadckpt', type=str, default='', help='load a specific checkpoint')
parser.add_argument('--logckptdir', default='./checkpoints/', help='the directory to save checkpoints/logs')
parser.add_argument('--loggingdir', default='./logs/', help='the directory to save logging outputs')
parser.add_argument('--resume', type=int, default=0, help='continue to train the model')
# Evaluation settings
parser.add_argument('--outdir', default='./outputs/debug/', help='the directory to save depth outputs')
parser.add_argument('--eval_visualizeDepth', type=int, default=1)
parser.add_argument('--eval_prob_filtering', type=int, default=0)
parser.add_argument('--eval_prob_threshold', type=float, default=0.99)
parser.add_argument('--eval_shuffle', type=int, default=0)
# Here is new parameters
# Since 4 2080ti(s) are used for training in our paper, this parameter should be modified according to the equipments used.
parser.add_argument('--cuda_ids', default='0,1,2,3', help="GPUs used in train or test,'0,1' for example."
"If only cpu is used, use '666' instead")
parser.add_argument('--groups', type=int, default=4,help='Groups used for GWC.')
parser.add_argument('--num_heads',type=int, default=1, help='Heads for Self-Attention layer. Single head is set as default.')
return parser
def checkArgs(args):
# Check if the settings is valid
assert args.mode in ["train", "val", "test"]
if args.resume:
assert len(args.loadckpt) == 0
if args.loadckpt:
assert args.resume is 0
| 3,731 | 56.415385 | 135 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/fusion/fusibile_to_dtu_eval.py
|
# Convert output of fusibile to DTU evaluation format.
# Author: Jiayu Yang
# Modified by B. Liu
import os
from os import listdir
fusibile_out_folder="../outputs/fusibile_fused/"
dtu_eval_folder="../outputs/"
if not os.path.isdir(dtu_eval_folder):
os.mkdir(dtu_eval_folder)
# Read test list
testlist = "./scan_list_test.txt"
with open(testlist) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans]
for scan in scans:
# Move ply to dtu eval folder and rename
scan_folder = os.path.join(fusibile_out_folder,scan)
consis_folders = [f for f in listdir(scan_folder) if f.startswith('consistencyCheck-')]
consis_folders.sort()
consis_folder = consis_folders[-1]
source_ply = os.path.join(fusibile_out_folder,scan,consis_folder,'final3d_model.ply')
scan_idx = int(scan[4:])
target_ply = os.path.join(dtu_eval_folder,'AACVPMVSnet{:03d}_l3.ply'.format(scan_idx))
cmd = 'cp '+source_ply+' '+target_ply
print(cmd)
os.system(cmd)
| 998 | 28.382353 | 91 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/fusion/depthfusion.py
|
# Modified version of the original depthfusion.py from Yao Yao.
# Convert our output to Gipuma format for post-processing.
# By: Jiayu Yang
# Date: 2019-11-05
from __future__ import print_function
import argparse
import os
import time
import glob
import random
import math
import re
import sys
import shutil
from struct import *
import cv2
import numpy as np
import pylab as plt
from preprocess import *
import pdb
def read_gipuma_dmb(path):
'''read Gipuma .dmb format image'''
with open(path, "rb") as fid:
image_type = unpack('<i', fid.read(4))[0]
height = unpack('<i', fid.read(4))[0]
width = unpack('<i', fid.read(4))[0]
channel = unpack('<i', fid.read(4))[0]
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channel), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
def write_gipuma_dmb(path, image):
'''write Gipuma .dmb format image'''
image_shape = np.shape(image)
width = image_shape[1]
height = image_shape[0]
if len(image_shape) == 3:
channels = image_shape[2]
else:
channels = 1
if len(image_shape) == 3:
image = np.transpose(image, (2, 0, 1)).squeeze()
with open(path, "wb") as fid:
# fid.write(pack(1))
fid.write(pack('<i', 1))
fid.write(pack('<i', height))
fid.write(pack('<i', width))
fid.write(pack('<i', channels))
image.tofile(fid)
return
def mvsnet_to_gipuma_dmb(in_path, out_path):
'''convert mvsnet .pfm output to Gipuma .dmb format'''
image = load_pfm(open(in_path))
write_gipuma_dmb(out_path, image)
return
def mvsnet_to_gipuma_cam(in_path, out_path):
'''convert mvsnet camera to gipuma camera format'''
cam = load_cam(open(in_path))
extrinsic = cam[0:4][0:4][0]
intrinsic = cam[0:4][0:4][1]
intrinsic[3][0] = 0
intrinsic[3][1] = 0
intrinsic[3][2] = 0
intrinsic[3][3] = 0
projection_matrix = np.matmul(intrinsic, extrinsic)
projection_matrix = projection_matrix[0:3][:]
f = open(out_path, "w")
for i in range(0, 3):
for j in range(0, 4):
f.write(str(projection_matrix[i][j]) + ' ')
f.write('\n')
f.write('\n')
f.close()
return
def fake_gipuma_normal(in_depth_path, out_normal_path):
depth_image = read_gipuma_dmb(in_depth_path)
image_shape = np.shape(depth_image)
normal_image = np.ones_like(depth_image)
normal_image = np.reshape(normal_image, (image_shape[0], image_shape[1], 1))
normal_image = np.tile(normal_image, [1, 1, 3])
normal_image = normal_image / 1.732050808
mask_image = np.squeeze(np.where(depth_image > 0, 1, 0))
mask_image = np.reshape(mask_image, (image_shape[0], image_shape[1], 1))
mask_image = np.tile(mask_image, [1, 1, 3])
mask_image = np.float32(mask_image)
normal_image = np.multiply(normal_image, mask_image)
normal_image = np.float32(normal_image)
write_gipuma_dmb(out_normal_path, normal_image)
return
def mvsnet_to_gipuma(scan_folder, scan, dtu_test_root, gipuma_point_folder):
image_folder = os.path.join(dtu_test_root, 'Rectified', scan)
cam_folder = os.path.join(dtu_test_root, 'Cameras')
depth_folder = os.path.join(scan_folder, 'depth_est')
gipuma_cam_folder = os.path.join(gipuma_point_folder, 'cams')
gipuma_image_folder = os.path.join(gipuma_point_folder, 'images')
if not os.path.isdir(gipuma_point_folder):
os.mkdir(gipuma_point_folder)
if not os.path.isdir(gipuma_cam_folder):
os.mkdir(gipuma_cam_folder)
if not os.path.isdir(gipuma_image_folder):
os.mkdir(gipuma_image_folder)
# convert cameras
for view in range(0,49):
in_cam_file = os.path.join(cam_folder, "{:08d}_cam.txt".format(view))
out_cam_file = os.path.join(gipuma_cam_folder, "{:08d}.png.P".format(view))
mvsnet_to_gipuma_cam(in_cam_file, out_cam_file)
# copy images to gipuma image folder
for view in range(0,49):
in_image_file = os.path.join(image_folder, "rect_{:03d}_3_r5000.png".format(view+1))# Our image start from 1
out_image_file = os.path.join(gipuma_image_folder, "{:08d}.png".format(view))
shutil.copy(in_image_file, out_image_file)
# convert depth maps and fake normal maps
gipuma_prefix = '2333__'
for view in range(0,49):
sub_depth_folder = os.path.join(gipuma_point_folder, gipuma_prefix+"{:08d}".format(view))
if not os.path.isdir(sub_depth_folder):
os.mkdir(sub_depth_folder)
in_depth_pfm = os.path.join(depth_folder, "{:08d}_prob_filtered.pfm".format(view))
out_depth_dmb = os.path.join(sub_depth_folder, 'disp.dmb')
fake_normal_dmb = os.path.join(sub_depth_folder, 'normals.dmb')
mvsnet_to_gipuma_dmb(in_depth_pfm, out_depth_dmb)
fake_gipuma_normal(out_depth_dmb, fake_normal_dmb)
def probability_filter(scan_folder, prob_threshold):
depth_folder = os.path.join(scan_folder, 'depth_est')
prob_folder = os.path.join(scan_folder, 'confidence')
# convert cameras
for view in range(0,49):
init_depth_map_path = os.path.join(depth_folder, "{:08d}.pfm".format(view)) # New dataset outputs depth start from 0.
prob_map_path = os.path.join(prob_folder, "{:08d}.pfm".format(view)) # Same as above
out_depth_map_path = os.path.join(depth_folder, "{:08d}_prob_filtered.pfm".format(view)) # Gipuma start from 0
depth_map = load_pfm(open(init_depth_map_path))
prob_map = load_pfm(open(prob_map_path))
depth_map[prob_map < prob_threshold] = 0
write_pfm(out_depth_map_path, depth_map)
def depth_map_fusion(point_folder, fusibile_exe_path, disp_thresh, num_consistent):
cam_folder = os.path.join(point_folder, 'cams')
image_folder = os.path.join(point_folder, 'images')
depth_min = 0.001
depth_max = 100000
normal_thresh = 360
cmd = fusibile_exe_path
cmd = cmd + ' -input_folder ' + point_folder + '/'
cmd = cmd + ' -p_folder ' + cam_folder + '/'
cmd = cmd + ' -images_folder ' + image_folder + '/'
cmd = cmd + ' --depth_min=' + str(depth_min)
cmd = cmd + ' --depth_max=' + str(depth_max)
cmd = cmd + ' --normal_thresh=' + str(normal_thresh)
cmd = cmd + ' --disp_thresh=' + str(disp_thresh)
cmd = cmd + ' --num_consistent=' + str(num_consistent)
print (cmd)
os.system(cmd)
return
if __name__ == '__main__':
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--dtu_test_root', type=str, default = '')
parser.add_argument('--depth_folder', type=str, default = '')
parser.add_argument('--out_folder', type=str, default = '')
parser.add_argument('--fusibile_exe_path', type=str, default = './fusibile')
parser.add_argument('--prob_threshold', type=float, default = '0.8')
parser.add_argument('--disp_threshold', type=float, default = '0.13')
parser.add_argument('--num_consistent', type=float, default = '3')
args = parser.parse_args()
dtu_test_root = args.dtu_test_root
depth_folder = args.depth_folder
out_folder = args.out_folder
fusibile_exe_path = args.fusibile_exe_path
prob_threshold = args.prob_threshold
disp_threshold = args.disp_threshold
num_consistent = args.num_consistent
# Read test list
testlist = "./scan_list_test.txt"
with open(testlist) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans]
# Fusion
for scan in scans:
scan_folder = os.path.join(depth_folder, scan)
fusibile_workspace = os.path.join(depth_folder, out_folder, scan)
if not os.path.isdir(os.path.join(depth_folder, out_folder)):
os.mkdir(os.path.join(depth_folder, out_folder))
if not os.path.isdir(fusibile_workspace):
os.mkdir(fusibile_workspace)
# probability filtering
print ('filter depth map with probability map')
probability_filter(scan_folder, prob_threshold)
# convert to gipuma format
print ('Convert mvsnet output to gipuma input')
mvsnet_to_gipuma(scan_folder, scan, dtu_test_root, fusibile_workspace)
# depth map fusion with gipuma
print ('Run depth map fusion & filter')
depth_map_fusion(fusibile_workspace, fusibile_exe_path, disp_threshold, num_consistent)
| 8,459 | 33.390244 | 125 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/fusion/preprocess.py
|
#!/usr/bin/env python
"""
Copyright 2019, Yao Yao, HKUST.
Training preprocesses.
"""
from __future__ import print_function
import os
import time
import glob
import random
import math
import re
import sys
import cv2
import numpy as np
import tensorflow as tf
import scipy.io
import urllib
from tensorflow.python.lib.io import file_io
FLAGS = tf.app.flags.FLAGS
def center_image(img):
""" normalize image input """
img = img.astype(np.float32)
var = np.var(img, axis=(0,1), keepdims=True)
mean = np.mean(img, axis=(0,1), keepdims=True)
return (img - mean) / (np.sqrt(var) + 0.00000001)
def scale_camera(cam, scale=1):
""" resize input in order to produce sampled depth map """
new_cam = np.copy(cam)
# focal:
new_cam[1][0][0] = cam[1][0][0] * scale
new_cam[1][1][1] = cam[1][1][1] * scale
# principle point:
new_cam[1][0][2] = cam[1][0][2] * scale
new_cam[1][1][2] = cam[1][1][2] * scale
return new_cam
def scale_mvs_camera(cams, scale=1):
""" resize input in order to produce sampled depth map """
for view in range(FLAGS.view_num):
cams[view] = scale_camera(cams[view], scale=scale)
return cams
def scale_image(image, scale=1, interpolation='linear'):
""" resize image using cv2 """
if interpolation == 'linear':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
if interpolation == 'nearest':
return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
def scale_mvs_input(images, cams, depth_image=None, scale=1):
""" resize input to fit into the memory """
for view in range(FLAGS.view_num):
images[view] = scale_image(images[view], scale=scale)
cams[view] = scale_camera(cams[view], scale=scale)
if depth_image is None:
return images, cams
else:
depth_image = scale_image(depth_image, scale=scale, interpolation='nearest')
return images, cams, depth_image
def crop_mvs_input(images, cams, depth_image=None):
""" resize images and cameras to fit the network (can be divided by base image size) """
# crop images and cameras
for view in range(FLAGS.view_num):
h, w = images[view].shape[0:2]
new_h = h
new_w = w
if new_h > FLAGS.max_h:
new_h = FLAGS.max_h
else:
new_h = int(math.ceil(h / FLAGS.base_image_size) * FLAGS.base_image_size)
if new_w > FLAGS.max_w:
new_w = FLAGS.max_w
else:
new_w = int(math.ceil(w / FLAGS.base_image_size) * FLAGS.base_image_size)
start_h = int(math.ceil((h - new_h) / 2))
start_w = int(math.ceil((w - new_w) / 2))
finish_h = start_h + new_h
finish_w = start_w + new_w
images[view] = images[view][start_h:finish_h, start_w:finish_w]
cams[view][1][0][2] = cams[view][1][0][2] - start_w
cams[view][1][1][2] = cams[view][1][1][2] - start_h
# crop depth image
if not depth_image is None and view == 0:
depth_image = depth_image[start_h:finish_h, start_w:finish_w]
if not depth_image is None:
return images, cams, depth_image
else:
return images, cams
def mask_depth_image(depth_image, min_depth, max_depth):
""" mask out-of-range pixel to zero """
# print ('mask min max', min_depth, max_depth)
ret, depth_image = cv2.threshold(depth_image, min_depth, 100000, cv2.THRESH_TOZERO)
ret, depth_image = cv2.threshold(depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)
depth_image = np.expand_dims(depth_image, 2)
return depth_image
def load_cam(file, interval_scale=1):
""" read camera txt file """
cam = np.zeros((2, 4, 4))
words = file.read().split()
# read extrinsic
for i in range(0, 4):
for j in range(0, 4):
extrinsic_index = 4 * i + j + 1
cam[0][i][j] = words[extrinsic_index]
# read intrinsic
for i in range(0, 3):
for j in range(0, 3):
intrinsic_index = 3 * i + j + 18
cam[1][i][j] = words[intrinsic_index]
if len(words) == 29:
cam[1][3][0] = words[27]
cam[1][3][1] = float(words[28]) * interval_scale
cam[1][3][2] = 1100
cam[1][3][3] = cam[1][3][0] + cam[1][3][1] * cam[1][3][2]
elif len(words) == 30:
cam[1][3][0] = words[27]
cam[1][3][1] = float(words[28]) * interval_scale
cam[1][3][2] = words[29]
cam[1][3][3] = cam[1][3][0] + cam[1][3][1] * cam[1][3][2]
elif len(words) == 31:
cam[1][3][0] = words[27]
cam[1][3][1] = float(words[28]) * interval_scale
cam[1][3][2] = words[29]
cam[1][3][3] = words[30]
else:
cam[1][3][0] = 0
cam[1][3][1] = 0
cam[1][3][2] = 0
cam[1][3][3] = 0
return cam
def write_cam(file, cam):
# f = open(file, "w")
f = file_io.FileIO(file, "w")
f.write('extrinsic\n')
for i in range(0, 4):
for j in range(0, 4):
f.write(str(cam[0][i][j]) + ' ')
f.write('\n')
f.write('\n')
f.write('intrinsic\n')
for i in range(0, 3):
for j in range(0, 3):
f.write(str(cam[1][i][j]) + ' ')
f.write('\n')
f.write('\n' + str(cam[1][3][0]) + ' ' + str(cam[1][3][1]) + ' ' + str(cam[1][3][2]) + ' ' + str(cam[1][3][3]) + '\n')
f.close()
def load_pfm(file):
color = None
width = None
height = None
scale = None
data_type = None
header = file.readline().decode('UTF-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('UTF-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
# scale = float(file.readline().rstrip())
scale = float((file.readline()).decode('UTF-8').rstrip())
if scale < 0: # little-endian
data_type = '<f'
else:
data_type = '>f' # big-endian
data_string = file.read()
data = np.fromstring(data_string, data_type)
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = cv2.flip(data, 0)
return data
def write_pfm(file, image, scale=1):
file = file_io.FileIO(file, mode='wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
image_string = image.tostring()
file.write(image_string)
file.close()
def gen_dtu_resized_path(dtu_data_folder, mode='training'):
""" generate data paths for dtu dataset """
sample_list = []
# parse camera pairs
cluster_file_path = dtu_data_folder + '/Cameras/pair.txt'
# cluster_list = open(cluster_file_path).read().split()
cluster_list = file_io.FileIO(cluster_file_path, mode='r').read().split()
# 3 sets
training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,
45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,
74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128]
validation_set = [3, 5, 17, 21, 28, 35, 37, 38, 40, 43, 56, 59, 66, 67, 82, 86, 106, 117]
data_set = []
if mode == 'training':
data_set = training_set
elif mode == 'validation':
data_set = validation_set
# for each dataset
for i in data_set:
image_folder = os.path.join(dtu_data_folder, ('Rectified/scan%d_train' % i))
cam_folder = os.path.join(dtu_data_folder, 'Cameras/train')
depth_folder = os.path.join(dtu_data_folder, ('Depths/scan%d_train' % i))
if mode == 'training':
# for each lighting
for j in range(0, 7):
# for each reference image
for p in range(0, int(cluster_list[0])):
paths = []
# ref image
ref_index = int(cluster_list[22 * p + 1])
ref_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))
ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))
paths.append(ref_image_path)
paths.append(ref_cam_path)
# view images
for view in range(FLAGS.view_num - 1):
view_index = int(cluster_list[22 * p + 2 * view + 3])
view_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))
view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))
paths.append(view_image_path)
paths.append(view_cam_path)
# depth path
depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))
paths.append(depth_image_path)
sample_list.append(paths)
elif mode == 'validation':
j = 3
# for each reference image
for p in range(0, int(cluster_list[0])):
paths = []
# ref image
ref_index = int(cluster_list[22 * p + 1])
ref_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))
ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))
paths.append(ref_image_path)
paths.append(ref_cam_path)
# view images
for view in range(FLAGS.view_num - 1):
view_index = int(cluster_list[22 * p + 2 * view + 3])
view_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))
view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))
paths.append(view_image_path)
paths.append(view_cam_path)
# depth path
depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))
paths.append(depth_image_path)
sample_list.append(paths)
return sample_list
def gen_dtu_mvs_path(dtu_data_folder, mode='training'):
""" generate data paths for dtu dataset """
sample_list = []
# parse camera pairs
cluster_file_path = dtu_data_folder + '/Cameras/pair.txt'
cluster_list = open(cluster_file_path).read().split()
# 3 sets
training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,
45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,
74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128]
validation_set = [3, 5, 17, 21, 28, 35, 37, 38, 40, 43, 56, 59, 66, 67, 82, 86, 106, 117]
evaluation_set = [1, 4, 9, 10, 11, 12, 13, 15, 23, 24, 29, 32, 33, 34, 48, 49, 62, 75, 77,
110, 114, 118]
# for each dataset
data_set = []
if mode == 'training':
data_set = training_set
elif mode == 'validation':
data_set = validation_set
elif mode == 'evaluation':
data_set = evaluation_set
# for each dataset
for i in data_set:
image_folder = os.path.join(dtu_data_folder, ('Rectified/scan%d' % i))
cam_folder = os.path.join(dtu_data_folder, 'Cameras')
depth_folder = os.path.join(dtu_data_folder, ('Depths/scan%d' % i))
if mode == 'training':
# for each lighting
for j in range(0, 7):
# for each reference image
for p in range(0, int(cluster_list[0])):
paths = []
# ref image
ref_index = int(cluster_list[22 * p + 1])
ref_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))
ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))
paths.append(ref_image_path)
paths.append(ref_cam_path)
# view images
for view in range(FLAGS.view_num - 1):
view_index = int(cluster_list[22 * p + 2 * view + 3])
view_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))
view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))
paths.append(view_image_path)
paths.append(view_cam_path)
# depth path
depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))
paths.append(depth_image_path)
sample_list.append(paths)
else:
# for each reference image
j = 5
for p in range(0, int(cluster_list[0])):
paths = []
# ref image
ref_index = int(cluster_list[22 * p + 1])
ref_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))
ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))
paths.append(ref_image_path)
paths.append(ref_cam_path)
# view images
for view in range(FLAGS.view_num - 1):
view_index = int(cluster_list[22 * p + 2 * view + 3])
view_image_path = os.path.join(
image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))
view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))
paths.append(view_image_path)
paths.append(view_cam_path)
# depth path
depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))
paths.append(depth_image_path)
sample_list.append(paths)
return sample_list
def gen_mvs_list(mode='training'):
"""output paths in a list: [[I1_path1, C1_path, I2_path, C2_path, ...(, D1_path)], [...], ...]"""
sample_list = []
if FLAGS.train_dtu:
dtu_sample_list = gen_dtu_mvs_path(FLAGS.dtu_data_root, mode=mode)
sample_list = sample_list + dtu_sample_list
return sample_list
# for testing
def gen_pipeline_mvs_list(dense_folder):
""" mvs input path list """
image_folder = os.path.join(dense_folder, 'images')
cam_folder = os.path.join(dense_folder, 'cams')
cluster_list_path = os.path.join(dense_folder, 'pair.txt')
cluster_list = open(cluster_list_path).read().split()
# for each dataset
mvs_list = []
pos = 1
for i in range(int(cluster_list[0])):
paths = []
# ref image
ref_index = int(cluster_list[pos])
pos += 1
ref_image_path = os.path.join(image_folder, ('%08d.jpg' % ref_index))
ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))
paths.append(ref_image_path)
paths.append(ref_cam_path)
# view images
all_view_num = int(cluster_list[pos])
pos += 1
check_view_num = min(FLAGS.view_num - 1, all_view_num)
for view in range(check_view_num):
view_index = int(cluster_list[pos + 2 * view])
view_image_path = os.path.join(image_folder, ('%08d.jpg' % view_index))
view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))
paths.append(view_image_path)
paths.append(view_cam_path)
pos += 2 * all_view_num
# depth path
mvs_list.append(paths)
return mvs_list
| 17,048 | 37.572398 | 122 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/models/Module.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/18 0018 20:57
# @Author : Anzhu Yu
# @Site :
# @File : module.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def homo_warping(src_feature, ref_in, src_in, ref_ex, src_ex, depth_hypos):
# Apply homography warpping on one src feature map from src to ref view.
batch, channels = src_feature.shape[0], src_feature.shape[1]
num_depth = depth_hypos.shape[1]
height, width = src_feature.shape[2], src_feature.shape[3]
with torch.no_grad():
src_proj = torch.matmul(src_in, src_ex[:, 0:3, :])
ref_proj = torch.matmul(ref_in, ref_ex[:, 0:3, :])
last = torch.tensor([[[0, 0, 0, 1.0]]]).repeat(len(src_in), 1, 1).cuda()
src_proj = torch.cat((src_proj, last), 1)
ref_proj = torch.cat((ref_proj, last), 1)
proj = torch.matmul(src_proj, torch.inverse(ref_proj))
rot = proj[:, :3, :3] # [B,3,3]
trans = proj[:, :3, 3:4] # [B,3,1]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_feature.device),
torch.arange(0, width, dtype=torch.float32, device=src_feature.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W]
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W]
rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W]
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_hypos.view(batch, 1, num_depth,
1) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W]
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2]
grid = proj_xy
warped_src_fea = F.grid_sample(src_feature, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
return warped_src_fea
def depth_regression(p, depth_values):
"""
:param p: probability volume [B, D, H, W]
:param depth_values: discrete depth values [B, D]
:return: depth
"""
depth_values = depth_values.view(*depth_values.shape, 1, 1)
depth = torch.sum(p * depth_values, 1)
return depth
# Self-attention layer
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
# make sure that out_channels = 0 (mod groups)
assert self.out_channels % self.groups == 0, "ERROR INPUT,CHECK AGAIN!"
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self.padding])
# Learned transformation.
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch, -1, height, width)
# Activation here. The same with all the other conv layers.
return nn.LeakyReLU(0.1)(out)
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
## General convolution
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.LeakyReLU(0.1))
# Feature Extraction
class FeaturePyramid(nn.Module):
def __init__(self, num_heads=1):
super(FeaturePyramid, self).__init__()
self.conv0aa = conv(3, 64, kernel_size=3, stride=1)
self.conv0ba = conv(64, 64, kernel_size=3, stride=1)
self.conv0bb = conv(64, 64, kernel_size=3, stride=1)
self.conv0bc = conv(64, 32, kernel_size=3, stride=1)
self.conv0bd = conv(32, 32, kernel_size=3, stride=1)
self.conv0be = conv(32, 32, kernel_size=3, stride=1)
self.conv0bf = conv(32, 16, kernel_size=3, stride=1)
self.conv0bg = conv(16, 16, kernel_size=3, stride=1)
self.conv0bh = AttentionConv(16, 16, kernel_size=3, stride=1, groups=num_heads)
def forward(self, img, scales=5):
fp = []
f = self.conv0aa(img)
f = self.conv0bh(
self.conv0bg(self.conv0bf(self.conv0be(self.conv0bd(self.conv0bc(self.conv0bb(self.conv0ba(f))))))))
fp.append(f)
for scale in range(scales - 1):
img = nn.functional.interpolate(img, scale_factor=0.5, mode='bilinear', align_corners=None).detach()
f = self.conv0aa(img)
f = self.conv0bh(
self.conv0bg(self.conv0bf(self.conv0be(self.conv0bd(self.conv0bc(self.conv0bb(self.conv0ba(f))))))))
fp.append(f)
return fp
def conditionIntrinsics(intrinsics, img_shape, fp_shapes):
# Pre-condition intrinsics according to feature pyramid shape.
# Calculate downsample ratio for each level of feature pyramid
down_ratios = []
for fp_shape in fp_shapes:
down_ratios.append(img_shape[2] / fp_shape[2])
# condition intrinsics
intrinsics_out = []
for down_ratio in down_ratios:
intrinsics_tmp = intrinsics.clone()
# print(down_ratio)
intrinsics_tmp[:, :2, :] = intrinsics_tmp[:, :2, :] / down_ratio
intrinsics_out.append(intrinsics_tmp)
return torch.stack(intrinsics_out).permute(1, 0, 2, 3) # [B, nScale, 3, 3]
def calInitDepthInterval(ref_in, src_in, ref_ex, src_ex, pixel_interval):
return 165 # The mean depth interval calculated on 4-1 interval setting...
def calSweepingDepthHypo(ref_in, src_in, ref_ex, src_ex, depth_min, depth_max, nhypothesis_init=48):
# Batch
batchSize = ref_in.shape[0]
depth_range = depth_max[0] - depth_min[0]
depth_interval_mean = depth_range / (nhypothesis_init - 1)
# Make sure the number of depth hypothesis has a factor of 2
assert nhypothesis_init % 2 == 0
depth_hypos = torch.range(depth_min[0], depth_max[0], depth_interval_mean).unsqueeze(0)
# Assume depth range is consistent in one batch.
for b in range(1, batchSize):
depth_range = depth_max[b] - depth_min[b]
depth_hypos = torch.cat(
(depth_hypos, torch.range(depth_min[0], depth_max[0], depth_interval_mean).unsqueeze(0)), 0)
return depth_hypos.cuda()
def calDepthHypo(netArgs, ref_depths, ref_intrinsics, src_intrinsics, ref_extrinsics, src_extrinsics, depth_min,
depth_max, level):
## Calculate depth hypothesis maps for refine steps
# These two parameters determining the depth searching range and interval at finer level.
# For experiments on other datasets, the pixel_interval could be modified accordingly to get better results.
d = 4
pixel_interval = 1
nBatch = ref_depths.shape[0]
height = ref_depths.shape[1]
width = ref_depths.shape[2]
# Hard code the interval for training on DTU with 1 level of refinement.
# This depth interval is estimated by J.Yang for training boosting.
# Uncomment this part if other dataset is used.
if netArgs.mode == "train":
depth_interval = torch.tensor(
[6.8085] * nBatch).cuda()
depth_hypos = ref_depths.unsqueeze(1).repeat(1, d * 2, 1, 1)
# print(depth_interval[0])
for depth_level in range(-d, d):
depth_hypos[:, depth_level + d, :, :] += (depth_level) * depth_interval[0]
return depth_hypos
with torch.no_grad():
ref_depths = ref_depths
ref_intrinsics = ref_intrinsics.double()
src_intrinsics = src_intrinsics.squeeze(1).double()
ref_extrinsics = ref_extrinsics.double()
src_extrinsics = src_extrinsics.squeeze(1).double()
interval_maps = []
depth_hypos = ref_depths.unsqueeze(1).repeat(1, d * 2, 1, 1)
for batch in range(nBatch):
xx, yy = torch.meshgrid([torch.arange(0, width).cuda(), torch.arange(0, height).cuda()])
xxx = xx.reshape([-1]).double()
yyy = yy.reshape([-1]).double()
X = torch.stack([xxx, yyy, torch.ones_like(xxx)], dim=0)
D1 = torch.transpose(ref_depths[batch, :, :], 0, 1).reshape(
[-1]) # Transpose before reshape to produce identical results to numpy and matlab version.
D2 = D1 + 1
X1 = X * D1
X2 = X * D2
ray1 = torch.matmul(torch.inverse(ref_intrinsics[batch]), X1)
ray2 = torch.matmul(torch.inverse(ref_intrinsics[batch]), X2)
X1 = torch.cat([ray1, torch.ones_like(xxx).unsqueeze(0).double()], dim=0)
X1 = torch.matmul(torch.inverse(ref_extrinsics[batch]), X1)
X2 = torch.cat([ray2, torch.ones_like(xxx).unsqueeze(0).double()], dim=0)
X2 = torch.matmul(torch.inverse(ref_extrinsics[batch]), X2)
X1 = torch.matmul(src_extrinsics[batch][0], X1)
X2 = torch.matmul(src_extrinsics[batch][0], X2)
X1 = X1[:3]
X1 = torch.matmul(src_intrinsics[batch][0], X1)
X1_d = X1[2].clone()
X1 /= X1_d
X2 = X2[:3]
X2 = torch.matmul(src_intrinsics[batch][0], X2)
X2_d = X2[2].clone()
X2 /= X2_d
k = (X2[1] - X1[1]) / (X2[0] - X1[0])
b = X1[1] - k * X1[0]
theta = torch.atan(k)
X3 = X1 + torch.stack(
[torch.cos(theta) * pixel_interval, torch.sin(theta) * pixel_interval, torch.zeros_like(X1[2, :])],
dim=0)
A = torch.matmul(ref_intrinsics[batch], ref_extrinsics[batch][:3, :3])
tmp = torch.matmul(src_intrinsics[batch][0], src_extrinsics[batch][0, :3, :3])
A = torch.matmul(A, torch.inverse(tmp))
tmp1 = X1_d * torch.matmul(A, X1)
tmp2 = torch.matmul(A, X3)
M1 = torch.cat([X.t().unsqueeze(2), tmp2.t().unsqueeze(2)], axis=2)[:, 1:, :]
M2 = tmp1.t()[:, 1:]
ans = torch.matmul(torch.inverse(M1), M2.unsqueeze(2))
delta_d = ans[:, 0, 0]
interval_maps = torch.abs(delta_d).mean().repeat(ref_depths.shape[2], ref_depths.shape[1]).t()
for depth_level in range(-d, d):
depth_hypos[batch, depth_level + d, :, :] += depth_level * interval_maps
return depth_hypos.float() # Return the depth hypothesis map from statistical interval setting.
def depth_regression_refine(prob_volume, depth_hypothesis):
depth = torch.sum(prob_volume * depth_hypothesis, 1)
return depth
def proj_cost_AACVP(Group, settings, ref_feature, src_feature, level, ref_in, src_in, ref_ex, src_ex, depth_hypos):
## Calculate the cost volume for refined depth hypothesis selection
# AACVP Version.
batch, channels = ref_feature.shape[0], ref_feature.shape[1]
num_depth = depth_hypos.shape[1]
height, width = ref_feature.shape[2], ref_feature.shape[3]
B, C, H, W = ref_feature.shape
volume_sum = ref_feature.unsqueeze(2).repeat(1, 1, num_depth, 1, 1)
ref_volume = volume_sum
ref_volume = ref_volume.view(B, Group, C // Group, *ref_volume.shape[-3:])
volume_sum = 0
for src in range(settings.nsrc):
with torch.no_grad():
src_proj = torch.matmul(src_in[:, src, :, :], src_ex[:, src, 0:3, :])
ref_proj = torch.matmul(ref_in, ref_ex[:, 0:3, :])
last = torch.tensor([[[0, 0, 0, 1.0]]]).repeat(len(src_in), 1, 1).cuda()
src_proj = torch.cat((src_proj, last), 1)
ref_proj = torch.cat((ref_proj, last), 1)
proj = torch.matmul(src_proj, torch.inverse(ref_proj))
rot = proj[:, :3, :3]
trans = proj[:, :3, 3:4]
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=ref_feature.device),
torch.arange(0, width, dtype=torch.float32, device=ref_feature.device)])
y, x = y.contiguous(), x.contiguous()
y, x = y.view(height * width), x.view(height * width)
xyz = torch.stack((x, y, torch.ones_like(x)))
xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1)
rot_xyz = torch.matmul(rot, xyz)
rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_hypos.view(batch, 1, num_depth,
height * width) # [B, 3, Ndepth, H*W]
proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1)
proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]
proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3)
grid = proj_xy
warped_src_fea = F.grid_sample(src_feature[src][level], grid.view(batch, num_depth * height, width, 2),
mode='bilinear',
padding_mode='zeros')
warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width)
warped_src_fea = warped_src_fea.to(ref_volume.dtype)
warped_src_fea = warped_src_fea.view(*ref_volume.shape)
if settings.mode == 'training':
volume_sum = volume_sum + warped_src_fea # (B, Group, C//Group, D, h, w)
else:
volume_sum += warped_src_fea
del warped_src_fea
volume_variance = (volume_sum * ref_volume).mean(2).div_(settings.nsrc) # (B, Group, D, h, w)
del volume_sum, ref_volume
return volume_variance
| 16,022 | 44.649573 | 133 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/models/__init__.py
|
# encoding:UTF-8
from models.AACVPMVSNet import sL1_loss,MSE_loss, AACVPMVSNet
| 79 | 25.666667 | 61 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/models/AACVPMVSNet.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/18 0018 20:57
# @Author : Anzhu Yu
# @Site :
# @File : AACVPMVSNet.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Module import *
class ConvBnReLU3D(nn.Module):
"""ConvBnReLU3D
3D CNN Blocks with batchnorm and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(ConvBnReLU3D, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
bias=False)
self.bn = nn.BatchNorm3d(out_channels)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
class CostRegNetAACVP(nn.Module):
def __init__(self, in_channels):
super(CostRegNetAACVP, self).__init__()
# 16->in_channels
self.conv0 = ConvBnReLU3D(in_channels, 16, kernel_size=3, padding=1)
self.conv0a = ConvBnReLU3D(16, 16, kernel_size=3, padding=1)
self.conv1 = ConvBnReLU3D(16, 32, stride=2, kernel_size=3, padding=1)
self.conv2 = ConvBnReLU3D(32, 32, kernel_size=3, padding=1)
self.conv2a = ConvBnReLU3D(32, 32, kernel_size=3, padding=1)
self.conv3 = ConvBnReLU3D(32, 64, kernel_size=3, padding=1)
self.conv4 = ConvBnReLU3D(64, 64, kernel_size=3, padding=1)
self.conv4a = ConvBnReLU3D(64, 64, kernel_size=3, padding=1)
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1, output_padding=0, stride=1, bias=False),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True))
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True))
self.prob0 = nn.Conv3d(16, 1, 3, stride=1, padding=1)
def forward(self, x):
conv0 = self.conv0a(self.conv0(x))
conv2 = self.conv2a(self.conv2(self.conv1(conv0)))
conv4 = self.conv4a(self.conv4(self.conv3(conv2)))
conv5 = conv2 + self.conv5(conv4)
conv6 = conv0 + self.conv6(conv5)
prob = self.prob0(conv6).squeeze(1)
return prob
def sL1_loss(depth_est, depth_gt, mask):
return F.smooth_l1_loss(depth_est[mask], depth_gt[mask], reduction='mean')
def MSE_loss(depth_est, depth_gt, mask):
return F.mse_loss(depth_est[mask], depth_gt[mask], size_average=True)
# Here is the network
class AACVPMVSNet(nn.Module):
def __init__(self, args, group=4, num_heads=1):
super(AACVPMVSNet, self).__init__()
self.featurePyramid = FeaturePyramid(num_heads=num_heads)
self.args = args
self.Group = group
self.cost_reg_refine = CostRegNetAACVP(in_channels=self.Group)
def forward(self, ref_img, src_imgs, ref_in, src_in, ref_ex, src_ex, depth_min, depth_max):
# initialization
depth_est_list = []
output = {}
# Step 1: Feature extraction. Self-attention is used here.
ref_feature_pyramid = self.featurePyramid(ref_img, self.args.nscale)
src_feature_pyramids = []
for i in range(self.args.nsrc):
src_feature_pyramids.append(self.featurePyramid(src_imgs[:, i, :, :, :], self.args.nscale))
# in. and ex. matrices
ref_in_multiscales = conditionIntrinsics(ref_in, ref_img.shape,
[feature.shape for feature in ref_feature_pyramid])
src_in_multiscales = []
for i in range(self.args.nsrc):
src_in_multiscales.append(conditionIntrinsics(src_in[:, i], ref_img.shape,
[feature.shape for feature in src_feature_pyramids[i]]))
src_in_multiscales = torch.stack(src_in_multiscales).permute(1, 0, 2, 3, 4)
# Step 2: estimate the depth map at the coarsest level.
# nhypothesis = 48 for DTU Dataset as default.
depth_hypos = calSweepingDepthHypo(ref_in_multiscales[:, -1], src_in_multiscales[:, 0, -1], ref_ex, src_ex,
depth_min, depth_max, nhypothesis_init=48)
# Step 3: Cost Volume Pyramid calculated here.
ref_volume = ref_feature_pyramid[-1].unsqueeze(2).repeat(1, 1, len(depth_hypos[0]), 1, 1)
B, C, H, W = src_feature_pyramids[0][0].shape
V = self.args.nsrc
# Kwea3 implementation as reference
ref_volume = ref_volume.view(B, self.Group, C // self.Group, *ref_volume.shape[-3:])
volume_sum = 0
warp_volumes = None
for src_idx in range(self.args.nsrc):
# warpped features
warped_volume = homo_warping(src_feature_pyramids[src_idx][-1], ref_in_multiscales[:, -1],
src_in_multiscales[:, src_idx, -1, :, :],
ref_ex, src_ex[:, src_idx], depth_hypos)
## regular solution
warped_volume = warped_volume.view(*ref_volume.shape)
if self.args.mode == "train":
# (B, Groups, C//Groups, D, h, w)
volume_sum = volume_sum + warped_volume
else:
volume_sum += warped_volume
del warped_volume
## Aggregate multiple feature volumes by Similarity
## The parameter V is a little different with that in implementation of Kwea123
## V = nsrc here, while V denotes the quantity of all the input images in the implementation of Kwea123.
cost_volume = (volume_sum * ref_volume).mean(2).div_(V)
# Step 4: Estimate the Prob.
cost_reg = self.cost_reg_refine(cost_volume).squeeze(1)
# Release the GPU burden.
if self.args.mode == "test":
del volume_sum
del ref_volume
del warp_volumes
prob_volume = F.softmax(cost_reg, dim=1)
depth = depth_regression(prob_volume, depth_values=depth_hypos)
depth_est_list.append(depth)
# Step 5: Estimate the residual at each level.
for level in range(self.args.nscale - 2, -1, -1):
# Upsample
depth_up = nn.functional.interpolate(depth[None, :], size=None, scale_factor=2, mode='bicubic',
align_corners=None)
depth_up = depth_up.squeeze(0)
depth_hypos = calDepthHypo(self.args, depth_up, ref_in_multiscales[:, level, :, :],
src_in_multiscales[:, :, level, :, :], ref_ex, src_ex, depth_min, depth_max,
level)
cost_volume = proj_cost_AACVP(Group=self.Group, settings=self.args, ref_feature=ref_feature_pyramid[level],
src_feature=src_feature_pyramids,
level=level, ref_in=ref_in_multiscales[:, level, :, :],
src_in=src_in_multiscales[:, :, level, :, :], ref_ex=ref_ex,
src_ex=src_ex[:, :], depth_hypos=depth_hypos)
cost_reg2 = self.cost_reg_refine(cost_volume).squeeze(1)
if self.args.mode == "test":
del cost_volume
prob_volume = F.softmax(cost_reg2, dim=1)
if self.args.mode == "test":
del cost_reg2
# Depth regression
depth = depth_regression_refine(prob_volume, depth_hypos)
depth_est_list.append(depth)
# Step 6: Get the final result.
with torch.no_grad():
num_depth = prob_volume.shape[1]
prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1),
stride=1, padding=0).squeeze(1)
depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device,
dtype=torch.float)).long()
prob_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1)
if self.args.mode == "test":
del prob_volume
del depth
## For T&T and BlendedMVS dataset, the masks are fused with each level at given conf. to avoid noise pixels.
## This part is not implemented here.
## Return
depth_est_list.reverse() # Reverse the list so that depth_est_list[0] is the largest scale.
output["depth_est_list"] = depth_est_list
output["prob_confidence"] = prob_confidence
return output
| 8,747 | 45.042105 | 119 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/datasets/utils.py
|
# Data io utilities for the dataloader
# by: Jiayu Yang
# date: 2019-07-31
# Note: This file use part of the code from the following projects.
# Thanks for the authors for the great code.
# MVSNet: https://github.com/YoYo000/MVSNet
# MVSNet_pytorch: https://github.com/xy-guo/MVSNet_pytorch
import numpy as np
import re
import sys
from PIL import Image
import os, errno
# For debug:
# import matplotlib.pyplot as plt
# import pdb
def readScanList(scal_list_file,mode,logger):
logger.info("Reading scan list...")
scan_list_f = open(scal_list_file, "r")
scan_list = scan_list_f.read()
scan_list = scan_list.split()
scan_list_f.close()
logger.info("Done, Using following scans for "+mode+":\n"+str(scan_list))
return scan_list
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def save_pfm(filename, image, scale=1):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filename, "wb")
color = None
image = np.flipud(image)
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(('%f\n' % scale).encode('utf-8'))
image.tofile(file)
file.close()
def read_cam_file(filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
# depth_min & depth_interval: line 11
depth_min = float(lines[11].split()[0])
depth_interval = float(lines[11].split()[1])
depth_max = depth_min+(256*depth_interval)
return intrinsics, extrinsics, depth_min, depth_max
def write_cam(filename, intrinsic, extrinsic, depth_min, depth_max):
with open(filename, 'w') as f:
f.write('extrinsic\n')
for j in range(4):
for k in range(4):
f.write(str(extrinsic[j, k]) + ' ')
f.write('\n')
f.write('\nintrinsic\n')
for j in range(3):
for k in range(3):
f.write(str(intrinsic[j, k]) + ' ')
f.write('\n')
f.write('\n%f %f\n' % (depth_min,depth_max))
def read_img(filename):
img = Image.open(filename)
# scale 0~255 to 0~1
img = np.array(img, dtype=np.float32) / 255.
# for CVP
if img.shape[0] == 1200:
## normal & group with new costregnet
img = img[:1184,:1600,:]
# group
# img = img[:1152,:1536,:]
return img
def write_img(filename,image):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
image.save(filename)
return 1
def read_depth(filename):
# read pfm depth file
return np.array(read_pfm(filename)[0], dtype=np.float32)
| 4,744 | 28.47205 | 96 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/datasets/dtu_loader.py
|
# Dataloader for the DTU dataset in Yaoyao's format.
# by: Jiayu Yang
# date: 2020-01-28
# Note: This file use part of the code from the following projects.
# Thanks for the authors for the great code.
# MVSNet: https://github.com/YoYo000/MVSNet
# MVSNet_pytorch: https://github.com/xy-guo/MVSNet_pytorch
from .utils import *
from .dataPaths import *
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
class MVSDataset(Dataset):
def __init__(self, args, logger=None):
# Initializing the dataloader
super(MVSDataset, self).__init__()
# Parse input
self.args = args
self.data_root = self.args.dataset_root
self.scan_list_file = getScanListFile(self.data_root,self.args.mode)
self.pair_list_file = getPairListFile(self.data_root,self.args.mode)
self.logger = logger
if logger==None:
import logger
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
self.logger.addHandler(consoleHandler)
self.logger.info("File logger not configured, only writing logs to stdout.")
self.logger.info("Initiating dataloader for our pre-processed DTU dataset.")
self.logger.info("Using dataset:"+self.data_root+self.args.mode+"/")
self.metas = self.build_list(self.args.mode)
self.logger.info("Dataloader initialized.")
def build_list(self,mode):
# Build the item meta list
metas = []
# Read scan list
scan_list = readScanList(self.scan_list_file,self.args.mode, self.logger)
# Read pairs list
for scan in scan_list:
with open(self.pair_list_file) as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
# light conditions 0-6
if mode == "train":
for light_idx in range(7):
metas.append((scan, ref_view, src_views, light_idx))
elif mode == "test":
metas.append((scan, ref_view, src_views, 3))
self.logger.info("Done. metas:"+str(len(metas)))
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
scan, ref_view, src_views, light_idx = meta
assert self.args.nsrc <= len(src_views)
self.logger.debug("Getting Item:\nscan:"+str(scan)+"\nref_view:"+str(ref_view)+"\nsrc_view:"+str(src_views)+"\nlight_idx"+str(light_idx))
ref_img = []
src_imgs = []
ref_depths = []
ref_depth_mask = []
ref_intrinsics = []
src_intrinsics = []
ref_extrinsics = []
src_extrinsics = []
depth_min = []
depth_max = []
## 1. Read images
# ref image
ref_img_file = getImageFile(self.data_root,self.args.mode,scan,ref_view,light_idx)
ref_img = read_img(ref_img_file)
# src image(s)
for i in range(self.args.nsrc):
src_img_file = getImageFile(self.data_root,self.args.mode,scan,src_views[i],light_idx)
src_img = read_img(src_img_file)
src_imgs.append(src_img)
## 2. Read camera parameters
cam_file = getCameraFile(self.data_root,self.args.mode,ref_view)
ref_intrinsics, ref_extrinsics, depth_min, depth_max = read_cam_file(cam_file)
for i in range(self.args.nsrc):
cam_file = getCameraFile(self.data_root,self.args.mode,src_views[i])
intrinsics, extrinsics, depth_min_tmp, depth_max_tmp = read_cam_file(cam_file)
src_intrinsics.append(intrinsics)
src_extrinsics.append(extrinsics)
## 3. Read Depth Maps
if self.args.mode == "train":
imgsize = self.args.imgsize
nscale = self.args.nscale
# Read depth map of same size as input image first.
depth_file = getDepthFile(self.data_root,self.args.mode,scan,ref_view)
ref_depth = read_depth(depth_file)
depth_frame_size = (ref_depth.shape[0],ref_depth.shape[1])
frame = np.zeros(depth_frame_size)
frame[:ref_depth.shape[0],:ref_depth.shape[1]] = ref_depth
ref_depths.append(frame)
# Downsample the depth for each scale.
ref_depth = Image.fromarray(ref_depth)
original_size = np.array(ref_depth.size).astype(int)
for scale in range(1,nscale):
new_size = (original_size/(2**scale)).astype(int)
down_depth = ref_depth.resize((new_size),Image.BICUBIC)
frame = np.zeros(depth_frame_size)
down_np_depth = np.array(down_depth)
frame[:down_np_depth.shape[0],:down_np_depth.shape[1]] = down_np_depth
ref_depths.append(frame)
# Orgnize output and return
sample = {}
sample["ref_img"] = np.moveaxis(np.array(ref_img),2,0)
sample["src_imgs"] = np.moveaxis(np.array(src_imgs),3,1)
sample["ref_intrinsics"] = np.array(ref_intrinsics)
sample["src_intrinsics"] = np.array(src_intrinsics)
sample["ref_extrinsics"] = np.array(ref_extrinsics)
sample["src_extrinsics"] = np.array(src_extrinsics)
sample["depth_min"] = depth_min
sample["depth_max"] = depth_max
# print(sample)
if self.args.mode == "train":
sample["ref_depths"] = np.array(ref_depths,dtype=float)
sample["ref_depth_mask"] = np.array(ref_depth_mask)
elif self.args.mode == "test":
sample["filename"] = scan + '/{}/' + '{:0>8}'.format(ref_view) + "{}"
return sample
| 6,228 | 38.176101 | 145 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/datasets/data_io.py
|
import numpy as np
import re
import sys
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def save_pfm(filename, image, scale=1):
file = open(filename, "wb")
color = None
image = np.flipud(image)
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(('%f\n' % scale).encode('utf-8'))
image.tofile(file)
file.close()
| 1,867 | 24.944444 | 93 |
py
|
AACVP-MVSNet
|
AACVP-MVSNet-main/datasets/dataPaths.py
|
# Fetching file path and name for dataloader on our DTU dataset.
# by: Jiayu Yang
# date: 2019-08-01
import os
# DTU:
# 2020-01-31 14:20:42: Modified to read original yao's format.
def getScanListFile(data_root,mode):
scan_list_file = data_root+"scan_list_"+mode+".txt"
return scan_list_file
def getPairListFile(data_root,mode):
pair_list_file = data_root+"Cameras/pair.txt"
return pair_list_file
def getDepthFile(data_root,mode,scan,view):
depth_name = "depth_map_"+str(view).zfill(4)+".pfm"
if mode == "train":
scan_path = "Depths/"+scan+"_train/"
else:
scan_path = "Depths/"+scan+"/"
depth_file = os.path.join(data_root,scan_path,depth_name)
return depth_file
def getImageFile(data_root,mode,scan,view,light):
image_name = "rect_"+str(view+1).zfill(3)+"_"+str(light)+"_r5000.png"
if mode == "train":
scan_path = "Rectified/"+scan+"_train/"
else:
scan_path = "Rectified/"+scan+"/"
image_file = os.path.join(data_root,scan_path,image_name)
return image_file
def getCameraFile(data_root,mode,view):
cam_name = str(view).zfill(8)+"_cam.txt"
cam_path = "Cameras/"
cam_file = os.path.join(data_root,cam_path,cam_name)
return cam_file
| 1,243 | 29.341463 | 73 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/setup.py
|
from setuptools import setup, find_packages
setup(name='rocoboom_out', version='1.0', packages=find_packages())
| 113 | 27.5 | 67 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/__init__.py
| 0 | 0 | 0 |
py
|
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/ss_tools.py
|
import sys
import numpy as np
import numpy.linalg as la
from numpy.linalg import pinv
import cvxpy as cvx
import control
from utility.matrixmath import solveb, mat, vec
def groupdot(A, x):
"""
Perform dot product over groups of matrices,
suitable for performing many LTI state transitions in a vectorized fashion
"""
return np.einsum('...ik,...k', A, x)
def distance_between_lti(A1, A2):
# Definition following Hsu, Hardt, & Hardt 2019 https://arxiv.org/pdf/1908.01039.pdf
spectrum1 = la.eig(A1)[0]
spectrum2 = la.eig(A2)[0]
return la.norm(spectrum1 - spectrum2)
def make_ss(A, B, C, D=None, Q=None, R=None, S=None, dt=1.0,):
n, m, p = A.shape[0], B.shape[1], C.shape[0]
if D is None:
D = np.zeros([p, m])
ss = control.ss(A, B, C, D, dt)
if Q is None:
Q = np.zeros([n, n])
if R is None:
R = np.zeros([p, p])
if S is None:
S = np.zeros([n, p])
ss.Q = Q
ss.R = R
ss.S = S
return ss
def ss_change_coordinates(model_tgt, model_src, method='match'):
# Find a suitable similarity transform matrix P which transforms coordinates from x (source) to xbar (target)
# i.e. x = P @ xbar
A = np.asarray(model_tgt.A)
B = np.asarray(model_tgt.B)
C = np.asarray(model_tgt.C)
D = np.asarray(model_tgt.D)
Abar = np.asarray(model_src.A)
Bbar = np.asarray(model_src.B)
Cbar = np.asarray(model_src.C)
Dbar = np.asarray(model_src.D)
Qbar = np.asarray(model_src.Q)
Rbar = np.asarray(model_src.R)
Sbar = np.asarray(model_src.S)
# Get sizes
n = A.shape[0]
m = B.shape[1]
p = C.shape[0]
if method == 'match':
# Compute by minimizing the error in statespace matrices A, B, C
weight_A = 1.0
weight_B = 1.0
weight_C = 1.0
# weight_A = 1.0/A.size
# weight_B = 1.0/B.size
# weight_C = 1.0/C.size
# # Solution using CVX
# P = cvx.Variable((n, n))
#
# # Express squared Frobenius norm using Frobenius norm
# cost = weight_A*cvx.square(cvx.norm(P@Abar - A@P, 'fro')) \
# + weight_B*cvx.square(cvx.norm(P@Bbar - B, 'fro')) \
# + weight_C*cvx.square(cvx.norm(Cbar - C@P, 'fro'))
#
# # Express squared Frobenius norm directly with sum of squares
# cost = weight_A*cvx.sum(cvx.square(P@Abar - A@P)) \
# + weight_B*cvx.sum(cvx.square(P@Bbar - B)) \
# + weight_C*cvx.sum(cvx.square(Cbar - C@P))
#
# objective = cvx.Minimize(cost)
# constraints = []
# prob = cvx.Problem(objective, constraints)
# prob.solve()
# P = P.value
# TODO investigate whether this Jacobi-type algorithm can be used to solve the problem more quickly
# --seems only valid for systems with full rank C matrix, so we cannot use here
# https://ieeexplore.ieee.org/document/6669166
# Solution in closed form via vectorization, Kronecker products (this is a generalized Lyapunov equation)
I = np.eye(n)
G = np.kron(weight_A*np.dot(Abar, Abar.T) + weight_B*np.dot(Bbar, Bbar.T), I) \
+ np.kron(I, weight_A*np.dot(A.T, A) + weight_C*np.dot(C.T, C)) \
- weight_A*np.kron(Abar.T, A.T) - weight_A*np.kron(Abar, A)
H = weight_B*np.dot(B, Bbar.T) + weight_C*np.dot(C.T, Cbar)
vH = vec(H)
vP = la.solve(G, vH)
P = mat(vP)
# # DEBUG
# # Verify solution is a critical point
# from autograd import grad
# import autograd.numpy as anp
#
# # Manual expression for gradient
# def g_manual(x):
# P = mat(x)
#
# A_term = 2*anp.dot(P, anp.dot(Abar, Abar.T)) - 2*anp.dot(A, anp.dot(P, Abar.T)) - 2*anp.dot(A.T, anp.dot(P, Abar)) + 2*anp.dot(anp.dot(A.T, A), P)
# B_term = 2*anp.dot(P, anp.dot(Bbar, Bbar.T)) - 2*anp.dot(B, Bbar.T)
# C_term = 2*anp.dot(anp.dot(C.T, C), P) - 2*anp.dot(C.T, Cbar)
# return vec(weight_A*A_term + weight_B*B_term + weight_C*C_term)
#
# def myobj(x):
# P = mat(x)
#
# A_term = anp.sum(anp.square(anp.dot(P, Abar) - anp.dot(A, P)))
# B_term = anp.sum(anp.square(anp.dot(P, Bbar) - B))
# C_term = anp.sum(anp.square(Cbar - anp.dot(C, P)))
# return weight_A*A_term + weight_B*B_term + weight_C*C_term
# print(myobj(vec(P)))
# g_auto = grad(myobj)
#
# gval1 = g_auto(vec(P)) # should be zero
# gval2 = g_manual(vec(P)) # should be zero
# zzz = 0
elif method in ['reachable', 'observable', 'modal']:
ss_model_src = make_ss(model_src.A, model_src.B, model_src.C)
_, P = control.canonical_form(ss_model_src, form=method)
else:
raise ValueError('Invalid coordinate transform method!')
# Apply the transform to all the system matrices
Ahat = np.dot(P, solveb(Abar, P))
Bhat = np.dot(P, Bbar)
Chat = solveb(Cbar, P)
Dhat = np.copy(Dbar)
Qhat = np.dot(P, np.dot(Qbar, P.T))
Rhat = np.copy(Rbar)
Shat = np.dot(P, Sbar)
# Create a new ss object from the transformed system matrices
model_trans = make_ss(Ahat, Bhat, Chat, Dhat, Qhat, Rhat, Shat)
return model_trans, P
| 5,376 | 31.587879 | 160 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/sim.py
|
import numpy as np
import numpy.random as npr
from rocoboom_out.common.signal_gen import SigParam, make_sig
from rocoboom_out.common.ss_tools import groupdot
def make_exploration(m, Ns, T, u_explore_var):
"""
Generate exploration control signal
:param m: Number of inputs
:param Ns: Number of Monte Carlo samples
:param T: Number of time steps to simulate
:param u_explore_var: Control input exploration noise variance
"""
scale = np.sqrt(u_explore_var)
# u_hist = scale*npr.randn(Ns, T, m)
# params = [SigParam(method='gbn', mean=0.0, scale=scale)]
params = [SigParam(method='gbn', mean=0.0, scale=scale),
SigParam(method='wgn', mean=0.0, scale=scale)]
u_hist = np.array([make_sig(T, m, params) for i in range(Ns)])
return u_hist
def make_disturbance(n, p, Ns, T, W, V):
"""
Generate process and measurement disturbances/noise
:param n: Number of states
:param p: Number of outputs
:param Ns: Number of Monte Carlo samples
:param T: Number of time steps to simulate
:param W: State noise covariance matrix
:param V: Output noise covariance matrix
"""
w_hist = npr.multivariate_normal(np.zeros(n), W, size=(Ns, T))
v_hist = npr.multivariate_normal(np.zeros(p), V, size=(Ns, T))
return w_hist, v_hist
def lsim(A, B, C, D, Ns, T, u_hist, w_hist, v_hist, x0=None, verbose=False):
"""
Simulate multiple state-input-output trajectories of a stochastic linear system
with additive process and measurement disturbances.
:param A: State matrix
:param B: Input matrix
:param C: Output matrix
:param D: Direct matrix
:param Ns: Number of Monte Carlo samples
:param T: Number of time steps to simulate
:param u_hist: Control input history
:param w_hist: Additive process noise history
:param v_hist: Additive measurement noise history
:param x0: Initial state
:returns:
x_hist: State history
y_hist: Measurement output history
"""
n, m = B.shape
p, n = C.shape
# Preallocate state, output history
x_hist = np.zeros([Ns, T+1, n])
y_hist = np.zeros([Ns, T, p])
# Initial state
if x0 is None:
x0 = np.zeros(n)
x_hist[:, 0] = x0
# Loop over time
for t in range(T):
# Update state
x_hist[:, t+1] = groupdot(A, x_hist[:, t]) + groupdot(B, u_hist[:, t]) + w_hist[:, t]
# Update output
y_hist[:, t] = groupdot(C, x_hist[:, t]) + groupdot(D, u_hist[:, t]) + v_hist[:, t]
if verbose:
print('lsim step %d / %d complete' % (t, T))
return x_hist, y_hist
def make_offline_data(A, B, C, D, W, V, Ns, T, u_var, x0=None, verbose=False):
"""
Generate multiple state-input-output trajectories e.g. to be used as training data for sysid.
:param A: State matrix
:param B: Input matrix
:param C: Output matrix
:param D: Direct matrix
:param W: State noise covariance matrix
:param V: Output noise covariance matrix
:param Ns: Number of Monte Carlo samples
:param T: Number of time steps to simulate
:param u_var: Control input exploration noise variance
:param x0: Initial state
:param seed: Seed for NumPy random number generator
:returns:
x_hist: State history
u_hist: Control input history
y_hist: Measurement output history
w_hist: Additive process noise history
v_hist: Additive measurement noise history
"""
if verbose:
print("Generating offline sample trajectory data... ")
n, m = B.shape
p, n = C.shape
if verbose:
print("Generating exploration input data... ", end='')
u_hist = make_exploration(m, Ns, T, u_var)
if verbose:
print("...completed!")
if verbose:
print("Generating disturbance data... ", end='')
w_hist, v_hist = make_disturbance(n, p, Ns, T, W, V)
if verbose:
print("...completed!")
if verbose:
print("Generating disturbance data... ", end='')
x_hist, y_hist = lsim(A, B, C, D, Ns, T, u_hist, w_hist, v_hist, x0, verbose=verbose)
if verbose:
print("...completed!")
if verbose:
print("...offline sample data completed!")
return x_hist, u_hist, y_hist, w_hist, v_hist
def lsim_cl(ss, compensator, x0, w_hist, v_hist, T):
A, B, C = np.asarray(ss.A), np.asarray(ss.B), np.asarray(ss.C)
F, K, L = np.asarray(compensator.F), np.asarray(compensator.K), np.asarray(compensator.L)
n, m, p = A.shape[0], B.shape[1], C.shape[0]
x_hist = np.zeros([T+1, n])
xhat_hist = np.zeros([T+1, n])
u_hist = np.zeros([T, m])
y_hist = np.zeros([T, p])
x_hist[0] = x0
# xhat_hist[0] = x0 # THIS IS BAD, DO NOT DO THIS! Only valid if compensator is in the same state coordinate system as the true system, which is generally not true!
xhat_hist[0] = np.zeros_like(x0)
for t in range(T):
x = x_hist[t]
xhat = xhat_hist[t]
w = w_hist[t]
v = v_hist[t]
u = np.dot(K, xhat)
y = np.dot(C, x) + v
x_next = np.dot(A, x) + np.dot(B, u) + w
xhat_next = np.dot(F, xhat) + np.dot(L, y)
x_hist[t+1] = x_next
xhat_hist[t+1] = xhat_next
u_hist[t] = u
y_hist[t] = y
return x_hist, u_hist, y_hist, xhat_hist
| 5,328 | 30.720238 | 169 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/plotting.py
|
"""
Plotting functions
"""
from copy import copy
from warnings import warn
from time import sleep
import os
import numpy as np
import numpy.linalg as la
from scipy.stats import trim_mean
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import FormatStrFormatter
from matplotlib.cm import get_cmap
from utility.matrixmath import specrad
BIG = 1e100 # A big number to replace np.inf for plotting purposes
def compute_transparencies(quantiles, quantile_fill_alpha):
# Manually compute alphas of overlapping regions for legend patches
quantile_alphas = []
for j, quantile in enumerate(quantiles):
if j > 0:
quantile_alpha_old = quantile_alphas[j - 1]
quantile_alpha_new = quantile_fill_alpha + (1 - quantile_fill_alpha)*quantile_alpha_old
else:
quantile_alpha_new = quantile_fill_alpha
quantile_alphas.append(quantile_alpha_new)
return quantile_alphas
def multi_plot_paper(output_dict, cost_are_true, t_hist, t_start_estimate, t_evals,
show_print=True, show_plot=True,
plotfun_str='plot', xscale='linear', yscale='symlog',
show_mean=True, show_median=True, show_trimmed_mean=False, show_quantiles=True,
trim_mean_quantile=None, quantile_fill_alpha=0.2, quantile_color='tab:blue',
quantile_region='upper', quantile_style='fill', quantile_legend=True,
stat_diff_type='stat_of_diff',
show_xlabel=True, show_ylabel=True, show_title=True, show_legend=True,
show_grid=True, show_guideline=True, zoom=False,
figsize=(4.5, 3), save_plots=False, dirname_out=None):
# from matplotlib import rc
# rc('text', usetex=True)
control_schemes = list(output_dict.keys())
diff_scheme = control_schemes[0]+' minus '+control_schemes[1]
plot_fields = ['cost_future_hist',
'cost_future_hist',
'cost_future_hist',
'specrad_hist',
'specrad_hist',
'specrad_hist',
'Aerr_hist',
'Berr_hist',
'Cerr_hist',
'a_hist',
'b_hist',
'c_hist',
'gamma_reduction_hist']
plot_control_schemes = ['certainty_equivalent',
'robust',
diff_scheme,
'certainty_equivalent',
'robust',
diff_scheme,
'robust',
'robust',
'robust',
'robust',
'robust',
'robust',
'robust']
ylabels = ['Inf.-horz. perf.',
'Inf.-horz. perf.',
'Inf.-horz. perf. diff.',
'Spec. rad.',
'Spec. rad.',
'Spec. rad. diff.',
r'$\Vert \hat{A}-A \Vert$',
r'$\Vert \hat{B}-B \Vert$',
r'$\Vert \hat{C}-C \Vert$',
r'$a$',
r'$b$',
r'$c$',
r'$c_\gamma$']
filenames = ['cost_future_ce',
'cost_future_rmn',
'cost_future_diff',
'specrad_ce',
'specrad_rmn',
'specrad_diff',
'Aerr',
'Berr',
'Cerr',
'a',
'b',
'c',
'gamma_scale']
quantiles = [1.00, 0.999, 0.99, 0.95, 0.75]
quantiles = np.array(quantiles)
# Set quantile level for trimmed mean
if trim_mean_quantile is None:
trim_mean_quantile = np.max(quantiles[quantiles < 1])
quantile_alphas = compute_transparencies(quantiles, quantile_fill_alpha)
# Process history data for plotting
fields_to_normalize_by_cost_are_true = ['cost_future_hist']
fields_to_mean = []
fields_to_absmax = ['a_hist', 'b_hist', 'c_hist']
fields_to_vecnorm = ['x_train_hist', 'x_test_hist', 'u_train_hist', 'u_test_hist', 'x_opt_test_hist']
fields_to_fronorm = ['K_hist']
fields_to_squeeze = []
fields_to_truncate = ['x_train_hist', 'x_test_hist', 'x_opt_test_hist']
# Make the list of statistic names
statistics = ['ydata', 'mean', 'trimmed_mean', 'median']
for quantile in quantiles:
statistics.append('quantile_'+str(quantile))
statistics.append('quantile_'+str(1-quantile))
# Build the ydata dictionary from output_dict
ydata_dict = {}
for control_scheme in control_schemes:
ydata_dict[control_scheme] = {}
for field in plot_fields:
ydata_dict[control_scheme][field] = {}
# Preprocessing
if field in fields_to_normalize_by_cost_are_true:
ydata = (output_dict[control_scheme][field] / cost_are_true)
elif field in fields_to_mean:
ydata = np.mean(output_dict[control_scheme][field], axis=2)
elif field in fields_to_absmax:
ydata = np.max(np.abs(output_dict[control_scheme][field]), axis=2)
elif field in fields_to_vecnorm:
ydata = la.norm(output_dict[control_scheme][field], axis=2)
elif field in fields_to_fronorm:
ydata = la.norm(output_dict[control_scheme][field], ord='fro', axis=(2, 3))
else:
ydata = output_dict[control_scheme][field]
if field in fields_to_squeeze:
ydata = np.squeeze(ydata)
if field in fields_to_truncate:
ydata = ydata[:, :-1]
# Convert nan to inf
ydata[np.isnan(ydata)] = np.inf
# Convert inf to big number (needed for plotting so that inf values are not neglected)
ydata[np.isinf(ydata)] = BIG
# Store processed data
ydata_dict[control_scheme][field]['ydata'] = ydata
# Compute statistics
ydata_dict[control_scheme][field]['mean'] = np.mean(ydata, axis=0)
ydata_dict[control_scheme][field]['trimmed_mean'] = trim_mean(ydata, proportiontocut=1-trim_mean_quantile, axis=0)
ydata_dict[control_scheme][field]['median'] = np.median(ydata, axis=0)
for quantile in quantiles:
ydata_dict[control_scheme][field]['quantile_'+str(quantile)] = np.quantile(ydata, quantile, axis=0)
ydata_dict[control_scheme][field]['quantile_'+str(1-quantile)] = np.quantile(ydata, 1-quantile, axis=0)
# Compute statistic differences
control_scheme = control_schemes[0] + ' minus ' + control_schemes[1]
control_schemes.append(control_scheme)
ydata_dict[control_scheme] = {}
for field in plot_fields:
ydata_dict[control_scheme][field] = {}
for statistic in statistics:
# Choose whether to calculate statistics before or after taking the difference
if stat_diff_type=='diff_of_stat':
stat1 = ydata_dict[control_schemes[0]][field][statistic]
stat2 = ydata_dict[control_schemes[1]][field][statistic]
ydata_dict[control_scheme][field][statistic] = stat1 - stat2
elif stat_diff_type=='stat_of_diff':
#TODO: reuse statistic computation code above
ydata1 = ydata_dict[control_schemes[0]][field]['ydata']
ydata2 = ydata_dict[control_schemes[1]][field]['ydata']
ydata_diff = ydata1 - ydata2
ydata_dict[control_scheme][field]['ydata'] = ydata_diff
# Compute statistics
ydata_dict[control_scheme][field]['median'] = np.median(ydata_diff, axis=0)
ydata_dict[control_scheme][field]['mean'] = np.mean(ydata_diff, axis=0)
ydata_dict[control_scheme][field]['trimmed_mean'] = trim_mean(ydata_diff, proportiontocut=1-trim_mean_quantile, axis=0)
for quantile in quantiles:
ydata_dict[control_scheme][field]['quantile_'+str(quantile)] = np.quantile(ydata_diff, quantile, axis=0)
ydata_dict[control_scheme][field]['quantile_'+str(1-quantile)] = np.quantile(ydata_diff, 1-quantile, axis=0)
# x start index
x_start_idx = t_start_estimate
for control_scheme, field, ylabel_str, filename in zip(plot_control_schemes, plot_fields, ylabels, filenames):
try:
if show_plot:
# Initialize figure and axes
fig, ax = plt.subplots(figsize=figsize)
# Choose the plotting function
if plotfun_str == 'plot':
plotfun = ax.plot
fbstep = None
elif plotfun_str == 'step':
plotfun = ax.step
fbstep = 'pre'
# Choose the quantiles
if field == 'gamma_reduction_hist':
quantiles = [1.00, 0.95, 0.75]
quantile_regions = ['middle']
else:
quantiles = [0.999, 0.99, 0.95]
quantile_regions = ['upper', 'lower']
quantiles = np.array(quantiles)
legend_handles = []
legend_labels = []
if show_print:
print(control_scheme)
print(field)
t_idxs = []
for t in t_evals:
t_idxs.append(np.where(t_hist == t)[0][0])
xdata = t_hist[t_idxs]
if show_print:
print('Time')
print(xdata)
# Plot mean
if show_mean:
ydata = ydata_dict[control_scheme][field]['mean'][t_idxs]
if show_plot:
artist, = plotfun(xdata, ydata, color='k', lw=3, marker='d', markersize=8, zorder=120)
legend_handles.append(artist)
legend_labels.append("Mean")
if show_print:
print('Mean')
print(ydata)
# Plot trimmed mean
if show_trimmed_mean:
ydata = ydata_dict[control_scheme][field]['trimmed_mean'][t_idxs]
if show_plot:
artist, = plotfun(xdata, ydata, color='tab:grey', lw=3, zorder=130)
legend_handles.append(artist)
legend_labels.append("Trimmed mean, middle %.0f%%" % (100*(1-((1-trim_mean_quantile)*2))))
if show_print:
print('Trimmed mean')
print(ydata)
# Plot median
if show_median:
ydata = ydata_dict[control_scheme][field]['median'][t_idxs]
if show_plot:
artist, = plotfun(xdata, ydata, color='b', lw=3, zorder=110)
legend_handles.append(artist)
legend_labels.append("Median")
if show_print:
print('Median')
print(ydata)
# Plot quantiles
if show_quantiles:
if show_plot:
def plot_quantiles(quantile_region, quantile_color):
qi = 0
my_quantiles = reversed(quantiles) if quantile_region == 'lower' else quantiles
my_quantile_alphas = reversed(quantile_alphas) if quantile_region == 'lower' else quantile_alphas
for quantile, quantile_alpha in zip(my_quantiles, my_quantile_alphas):
if quantile_region == 'upper':
y_lwr = ydata_dict[control_scheme][field]['median'][t_idxs]
else:
y_lwr = ydata_dict[control_scheme][field]['quantile_'+str(1-quantile)][t_idxs]
if quantile_region == 'lower':
y_upr = ydata_dict[control_scheme][field]['median'][t_idxs]
else:
y_upr = ydata_dict[control_scheme][field]['quantile_'+str(quantile)][t_idxs]
ax.fill_between(xdata, y_lwr, y_upr, step=fbstep,
color=quantile_color, alpha=quantile_fill_alpha, zorder=qi)
qi += 1
if quantile_legend:
legend_handles.append(mpatches.Patch(color=quantile_color, alpha=quantile_alpha))
if quantile_region == 'middle':
legend_label_str = "Middle %.1f%%" % (100*(1-((1-quantile)*2)))
elif quantile_region == 'upper':
legend_label_str = "Upper %.1f%%" % (50*(1-((1-quantile)*2)))
elif quantile_region == 'lower':
legend_label_str = "Lower %.1f%%" % (50*(1-((1-quantile)*2)))
legend_labels.append(legend_label_str)
for quantile_region in quantile_regions:
if quantile_region == 'upper' or quantile_region == 'middle':
quantile_color = 'tab:blue'
elif quantile_region == 'lower':
if 'minus' in control_scheme:
quantile_color = 'tab:red'
else:
quantile_color = 'tab:green'
plot_quantiles(quantile_region, quantile_color)
# Print quantiles
if show_print:
for quantile in quantiles:
print('Quantile % .1f%%'%(100*(1 - quantile)))
y_lwr = ydata_dict[control_scheme][field]['quantile_' + str(1 - quantile)][t_idxs]
print(y_lwr)
for quantile in reversed(quantiles):
print('Quantile % .1f%%'%(100*quantile))
y_upr = ydata_dict[control_scheme][field]['quantile_' + str(quantile)][t_idxs]
print(y_upr)
# ax.set_xlim(xl)
# Plot guidelines
if show_plot:
if show_guideline:
y_guide = np.zeros(ydata_dict[control_scheme][field]['ydata'].shape[1])[t_idxs]
if field in ['specrad_hist', 'gamma_reduction_hist', 'cost_future_hist'] and not 'minus' in control_scheme:
y_guide = np.ones(ydata_dict[control_scheme][field]['ydata'].shape[1])[t_idxs]
plotfun(xdata, y_guide, color='tab:grey', lw=2, linestyle='--', zorder=20)
yscale = 'symlog'
if field == 'gamma_reduction_hist':
yscale = 'linear'
elif field in ['a_hist', 'b_hist', 'c_hist', 'Aerr_hist', 'Berr_hist', 'Cerr_hist']:
yscale = 'log'
# Set axes options
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if show_legend:
loc = 'best'
if field == 'regret_hist':
loc = 'center right'
elif field == 'gamma_reduction_hist':
loc = 'lower right'
leg = ax.legend(handles=legend_handles, labels=legend_labels, loc=loc)
leg.set_zorder(1000)
# if field == 'regret_hist' and not control_scheme == diff_scheme:
# yl = [-0.1, 1e27]
# else:
# ydata_lim_lwr = ydata_dict[control_scheme][field]['median'][x_start_idx:]
# ydata_lim_upr = ydata_dict[control_scheme][field]['quantile_'+str(max(quantiles))][x_start_idx:]
# ydata_lim_lwr = ydata_lim_lwr[np.isfinite(ydata_lim_lwr)]
# ydata_lim_upr = ydata_lim_upr[np.isfinite(ydata_lim_upr)]
# yl_lwr = np.min(ydata_lim_lwr)
# yl_upr = np.max(ydata_lim_upr)
# yl = [yl_lwr, yl_upr]
# ax.set_ylim(yl)
# Hardcode axis limits and ticks
if not 'minus' in control_scheme:
if field == 'cost_future_hist':
yl = [0.98, 1.22]
ax.set_ylim(yl)
plt.yticks([1.0, 1.1, 1.2])
# pass
elif field == 'regret_hist':
# plt.locator_params(axis='y', numticks=6)
plt.yticks([0, 1e10, 1e20, 1e30, 1e40, 1e50, 1e60, 1e70, 1e80])
# pass
elif field == 'specrad_hist':
plt.yticks([0.0, 0.5, 1.0, 1.5])
# pass
elif field in ['a_hist', 'b_hist', 'c_hist']:
# plt.yticks([0, 0.5, 1, 1.5, 2])
pass
elif field == 'gamma_reduction_hist':
plt.yticks([0, 0.25, 0.5, 0.75, 1])
else:
if field == 'cost_future_hist':
yl = [-0.02, 0.02]
ax.set_ylim(yl)
plt.yticks([-0.02, -0.01, 0.0, 0.01, 0.02])
elif field == 'specrad_hist':
yl = [-0.6, 0.6]
ax.set_ylim(yl)
plt.yticks([-0.6, -0.3, 0.0, 0.3, 0.6])
# xl = [t_hist[x_start_idx], t_hist[-1]*1.25]
# ax.set_xlim(xl)
# if field == 'a_hist' or field == 'b_hist' or field == 'gamma_reduction_hist':
# xl = [t_hist[x_start_idx], 20]
# Plot options
if show_grid:
ax.grid('on')
ax.set_axisbelow(True)
if show_xlabel:
xlabel_str = 'Time'
ax.set_xlabel(xlabel_str, fontsize=12)
if show_ylabel:
# rot = None
# ax.set_ylabel(ylabel_str, fontsize=12, rotation=rot)
ax.set_ylabel(ylabel_str, fontsize=12)
if show_title:
# title_str = ylabel_str + '_' + control_scheme
title_str = control_scheme
title_str = title_str.replace('_', ' ').title()
ax.set_title(title_str)
fig.tight_layout()
if save_plots:
filename_out = 'plot_' + filename + '.png'
path_out = os.path.join(dirname_out, filename_out)
plt.savefig(path_out, dpi=600, bbox_inches="tight")
if show_print:
print()
except:
pass
if show_plot:
plt.show()
| 19,174 | 43.182028 | 135 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/uncertainty.py
|
from time import time
import numpy as np
import numpy.linalg as la
import numpy.random as npr
from utility.matrixmath import mdot
from utility.printing import create_tag
from rocoboom_out.common.sysid import system_identification
from rocoboom_out.common.ss_tools import ss_change_coordinates, groupdot
class Uncertainty:
# Small data class to hold multiplicative noise variances and direction matrices
def __init__(self, a=None, Aa=None, b=None, Bb=None, c=None, Cc=None):
self.a = a
self.Aa = Aa
self.b = b
self.Bb = Bb
self.c = c
self.Cc = Cc
def block_bootstrap(u_hist, y_hist, t=None, Nb=None, blocksize=40):
# Get sizes
p = y_hist.shape[1]
m = u_hist.shape[1]
# Get time horizon
if t is None:
t = u_hist.shape[0]
# Preallocate bootstrap sample arrays
u_boot_hist = np.zeros([Nb, t, m])
y_boot_hist = np.zeros([Nb, t, p])
if blocksize > t:
raise ValueError('Blocksize exceeds data length, reduce blocksize!')
for i in range(Nb):
# Sample blocks of i/o data iid with replacement until the buffer is filled out
start = 0
end = start + blocksize
while end < t:
if start + blocksize < t:
end = start + blocksize
else:
end = t
idx = npr.randint(t-blocksize) + np.arange(blocksize)
u_boot_hist[i, start:end] = u_hist[idx]
y_boot_hist[i, start:end] = y_hist[idx]
start = end
return u_boot_hist, y_boot_hist
def semiparametric_bootstrap(model, u_hist, y_hist, w_hist, v_hist, t=None, Nb=None):
"""
Compute estimate of model uncertainty (covariance) via semiparametric bootstrap
:param model: object, nominal model with attributes A matrix, B matrix, C matrix
:param t: int, time up to which to use the available data.
:param Nb: int, number of bootstrap samples
"""
A = model.A
B = model.B
C = model.C
# Get sizes
n = A.shape[0]
m = B.shape[1]
p = C.shape[0]
# Get time horizon
if t is None:
t = u_hist.shape[0]
# Preallocate bootstrap sample arrays
x_boot_hist = np.zeros([Nb, t+1, n])
u_boot_hist = np.zeros([Nb, t, m])
y_boot_hist = np.zeros([Nb, t, p])
w_boot_hist = np.zeros([Nb, t, n])
v_boot_hist = np.zeros([Nb, t, p])
# Initialize bootstrap training data
for i in range(Nb):
# Initialize state
x_boot_hist[i, 0] = np.zeros(n)
# Copy input sequence
u_boot_hist[i] = u_hist[0:t]
# Sample residuals iid with replacement
idx = npr.randint(w_hist.shape[0], size=t)
w_boot_hist[i] = w_hist[idx]
v_boot_hist[i] = v_hist[idx]
# Form bootstrap training data
for t_samp in range(t):
# Update state
x_boot_hist[:, t_samp+1] = (groupdot(A, x_boot_hist[:, t_samp])
+ groupdot(B, u_boot_hist[:, t_samp])
+ w_boot_hist[:, t_samp])
# Update output
y_boot_hist[:, t_samp] = (groupdot(C, x_boot_hist[:, t_samp])
+ v_boot_hist[:, t_samp])
return u_boot_hist, y_boot_hist
def check_diff(model1, model2):
A1, B1, C1 = model1.A, model1.B, model1.C
A2, B2, C2 = model2.A, model2.B, model2.C
print(A1)
print(A2)
print('')
print(B1)
print(B2)
print('')
print(C1)
print(C2)
print('')
def ensemble2multnoise(model, u_boot_hist, y_boot_hist, return_models=False, verbose=False):
"""Convert an ensemble of data histories to a multiplicative noise representation."""
A = model.A
B = model.B
C = model.C
# Get sizes
n = A.shape[0]
m = B.shape[1]
p = C.shape[0]
Nb = u_boot_hist.shape[0]
# Form bootstrap model estimates
Ahat_boot = np.zeros([Nb, n, n])
Bhat_boot = np.zeros([Nb, n, m])
Chat_boot = np.zeros([Nb, p, n])
if return_models:
models_boot = []
for i in range(Nb):
model_boot = system_identification(y_boot_hist[i], u_boot_hist[i], SS_fixed_order=n)
# check_diff(model, model_boot)
# Transform to align coordinate systems as much as possible
model_trans, P = ss_change_coordinates(model, model_boot, method='match')
if return_models:
models_boot.append(model_trans)
Ahat_boot[i] = model_trans.A
Bhat_boot[i] = model_trans.B
Chat_boot[i] = model_trans.C
if verbose:
print('created bootstrap model %6d / %6d' % (i+1, Nb))
# Sample variance of bootstrap estimates
Ahat_boot_reshaped = np.reshape(Ahat_boot, [Nb, n*n], order='F')
Bhat_boot_reshaped = np.reshape(Bhat_boot, [Nb, n*m], order='F')
Chat_boot_reshaped = np.reshape(Chat_boot, [Nb, p*n], order='F')
Ahat_boot_mean_reshaped = np.mean(Ahat_boot_reshaped, axis=0)
Bhat_boot_mean_reshaped = np.mean(Bhat_boot_reshaped, axis=0)
Chat_boot_mean_reshaped = np.mean(Chat_boot_reshaped, axis=0)
# TODO account for correlation between A, B, C
# can we do it easily with a decorrelation scheme /coordinate change?
# or do we need to re-derive the full gdare w/ terms?
Abar = Ahat_boot_reshaped - Ahat_boot_mean_reshaped
Bbar = Bhat_boot_reshaped - Bhat_boot_mean_reshaped
Cbar = Chat_boot_reshaped - Chat_boot_mean_reshaped
SigmaA = np.dot(Abar.T, Abar)/(Nb-1)
SigmaB = np.dot(Bbar.T, Bbar)/(Nb-1)
SigmaC = np.dot(Cbar.T, Cbar)/(Nb-1)
SigmaAeigvals, SigmaAeigvecs = la.eigh(SigmaA)
SigmaBeigvals, SigmaBeigvecs = la.eigh(SigmaB)
SigmaCeigvals, SigmaCeigvecs = la.eigh(SigmaC)
a = np.real(SigmaAeigvals)
b = np.real(SigmaBeigvals)
c = np.real(SigmaCeigvals)
Aa = np.reshape(SigmaAeigvecs, [n*n, n, n], order='F') # These uncertainty directions have unit Frobenius norm
Bb = np.reshape(SigmaBeigvecs, [n*m, n, m], order='F') # These uncertainty directions have unit Frobenius norm
Cc = np.reshape(SigmaCeigvecs, [p*n, p, n], order='F') # These uncertainty directions have unit Frobenius norm
out_dict = {'uncertainty': Uncertainty(a, Aa, b, Bb, c, Cc)}
if return_models:
out_dict['models_boot'] = models_boot
return out_dict
def estimate_model_uncertainty(model, u_hist, y_hist, w_est, v_est, t, Nb,
uncertainty_estimator=None, return_models=False, log_diagnostics=False):
if log_diagnostics:
time_start = time()
tag_list = []
if uncertainty_estimator is None:
out_dict = {'uncertainty': None}
else:
if uncertainty_estimator == 'exact':
raise NotImplementedError
# # TODO
# # "Cheat" by using the true error as the multiplicative noise
# Aa = np.zeros([n*n, n, n])
# Bb = np.zeros([n*m, n, m])
# Cc = np.zeros([p*n, p, n])
# Aa[0] = Adiff/Aerr
# Bb[0] = Bdiff/Berr
# Cc[0] = Cdiff/Cerr
# a = np.zeros(n*n)
# b = np.zeros(n*m)
# c = np.zeros(p*n)
# a[0] = Aerr**2
# b[0] = Berr**2
# c[0] = Cerr**2
else:
if uncertainty_estimator == 'block_bootstrap':
u_boot_hist, y_boot_hist = block_bootstrap(u_hist, y_hist, t, Nb)
elif uncertainty_estimator == 'semiparametric_bootstrap':
u_boot_hist, y_boot_hist = semiparametric_bootstrap(model, u_hist, y_hist, w_est, v_est, t, Nb)
else:
raise ValueError('Invalid uncertainty estimator method!')
# TEST/DEBUG ONLY
# # this is OK
# # CHEAT by passing the true model and true residuals in
# # This gives the most accurate assessment of the uncertainty in the model estimate
# # because it is almost like getting a brand new dataset for each model sample
# u_boot_hist, y_boot_hist = semiparametric_bootstrap(model_true, u_hist, y_hist, w_hist, v_hist, Nb=Nb)
# this is OK
# # Transform process noise approximately into model coordinates
# w_hat_hist = np.zeros_like(w_hist)
# v_hat_hist = np.copy(v_hist)
# for i in range(T):
# # w_hat_hist[i] = np.dot(P, w_hist[i])
# w_hat_hist[i] = np.dot(la.inv(P), w_hist[i])
#
# u_boot_hist, y_boot_hist = semiparametric_bootstrap(model, u_hist, y_hist, w_hat_hist, v_hat_hist, Nb=Nb)
# # TODO investigate why sometimes w_est and v_est are so much smaller in magnitude than w_hist and v_hist
# # can be verified by comparing model.Q vs W and model.R vs V which are the process & sensor noise covariances
# Form bootstrap model estimates from bootstrap datasets
# and convert covariances into multiplicative noises
out_dict = ensemble2multnoise(model, u_boot_hist, y_boot_hist, return_models=return_models)
if log_diagnostics:
time_end = time()
time_elapsed = time_end - time_start
tag_list.append(create_tag("time to make uncertainty: %f" % time_elapsed))
out_dict['tag_list'] = tag_list
return out_dict
| 9,291 | 34.601533 | 128 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/problem_data_gen.py
|
"""
Problem data generation.
Generic outputs are:
:param n: Number of states, integer
:param m: Number of inputs, integer
:param p: Number of outputs, integer
:param A: System state matrix, n x n matrix
:param B: System control input matrix, n x m matrix
:param C: System output matrix, p x n matrix
:param Q: State-dependent quadratic cost, n x n matrix
:param R: Control-dependent quadratic cost, m x m matrix
:param W: Additive process noise covariance, n x n matrix
:param V: Additive output noise covariance, p x p matrix
"""
import numpy as np
import numpy.random as npr
import numpy.linalg as la
import scipy.linalg as sla
from utility.matrixmath import specrad, mdot
from utility.pickle_io import pickle_import, pickle_export
def gen_rand_system(n=4, m=3, p=2, spectral_radius=0.9, noise_scale=0.0001, seed=None):
"""
Generate a random system
:param n: Number of states, integer
:param m: Number of inputs, integer
:param p: Number of outputs, integer
:param spectral_radius: Open-loop spectral radius of A, float
:param noise_scale: Scaling of noise covariance, float
:param seed: Seed for random number generator, positive integer
:returns: Number of states, number of inputs, state matrix, input matrix, state cost matrix, input cost matrix,
additive noise covariance matrix
"""
npr.seed(seed)
A = npr.randn(n, n)
A *= spectral_radius/specrad(A)
B = npr.randn(n, m)
C = npr.randn(p, n)
D = np.zeros([p, m])
# Might want to do this if we assume we dont know the system up front
Y = np.eye(p) # Output penalty
Q = np.dot(C.T, np.dot(Y, C)) # State penalty
# Q = np.eye(n)
R = np.eye(m) # Control penalty
# We don't need to do this since SIPPY will estimate the noise covariances for us
# Z = noise_scale*np.eye(m) # Control noise covariance
# W = np.dot(B, np.dot(Z, B.T)) # State noise covariance
W = noise_scale*np.eye(n) # State noise covariance
V = noise_scale*np.eye(p) # Output noise covariance
U = np.zeros([n, p]) # State-output noise cross-covariance
return n, m, p, A, B, C, D, Y, Q, R, W, V, U
def gen_scalar_system(A=1.0, B=1.0, C=1.0, D=0.0, Y=1.0, R=1.0, W=1.0, V=1.0, U=0.0):
n, m, p = 1, 1, 1
A, B, C, D, Y, R, W, V, U = [np.atleast_2d(var) for var in [A, B, C, D, Y, R, W, V, U]]
Q = C*Y*C
return n, m, p, A, B, C, D, Y, Q, R, W, V, U
def gen_pendulum_system(inverted, mass=10, damp=2, dt=0.1, Y=None, R=None, W=None, V=None, U=None):
# Pendulum with forward Euler discretization
# x[0] = angular position
# x[1] = angular velocity
n = 2
m = 1
p = 1
if inverted:
sign = 1
else:
sign = -1
A = np.array([[1.0, dt],
[sign*mass*dt, 1.0-damp*dt]])
B = np.array([[0],
[dt]])
C = np.array([[1.0, 0.0]])
D = np.array([[0.0]])
if Y is None:
Y = np.eye(p)
Q = np.dot(C.T, np.dot(Y, C))
if R is None:
R = np.eye(m)
if W is None:
W = 0.001*np.diag([0.0, 1.0])
if V is None:
V = 0.001*np.diag([0.1])
if U is None:
U = np.zeros([n, p])
return n, m, p, A, B, C, D, Y, Q, R, W, V, U
def gen_example_system(idx, noise_scale=1.0):
"""
Example systems
:param idx: Selection integer to pick the example system.
"""
if idx == 1:
# 2-state shift register from https://www.argmin.net/2020/07/27/discrete-fragility/
# Possibly interesting lack of robustness under CE control
n, m, p = 2, 1, 1
A = np.array([[0.0, 1.0],
[0.0, 0.0]])
B = np.array([[0.0],
[1.0]])
C = np.array([[1.0, -1.0]])
D = np.array([[0.0]])
Y = np.eye(p)
Q = np.dot(C.T, np.dot(Y, C))
R = 0.01*np.eye(m)
W = 0.1*np.eye(n)
V = 0.1*np.eye(p)
U = np.zeros([n, p])
# A += 0.1*npr.randn(n, n)
# B += 0.1*npr.randn(n, m)
# C += 0.1*npr.randn(p, n)
elif idx == 2:
# 3-state system from https://arxiv.org/pdf/1710.01688.pdf
# Possibly interesting lack of robustness under CE control
n, m, p = 3, 3, 3
A = np.array([[1.01, 0.01, 0.00],
[0.01, 1.01, 0.01],
[0.00, 0.01, 1.01]])
B = np.eye(3)
C = np.eye(3)
D = np.zeros([3, 3])
Y = np.eye(p)
Q = np.dot(C.T, np.dot(Y, C))
R = 0.01*np.eye(m)
W = 0.1*np.eye(n)
V = 0.1*np.eye(p)
U = np.zeros([n, p])
else:
raise Exception('Invalid system index chosen, please choose a different one')
W *= noise_scale
V *= noise_scale
U *= noise_scale
return n, m, p, A, B, C, D, Y, Q, R, W, V, U
def gen_system_omni(system_idx, **kwargs):
"""
Wrapper for system generation functions.
"""
if system_idx == 'inverted_pendulum':
return gen_pendulum_system(inverted=True, **kwargs)
elif system_idx == 'noninverted_pendulum':
return gen_pendulum_system(inverted=False, **kwargs)
elif system_idx == 'scalar':
return gen_scalar_system(**kwargs)
elif system_idx == 'rand':
return gen_rand_system(**kwargs)
else:
return gen_example_system(idx=system_idx, **kwargs)
def save_system(n, m, p, A, B, C, D, Y, Q, R, W, V, U, dirname_out, filename_out):
variables = [n, m, p, A, B, C, D, Y, Q, R, W, V, U]
variable_names = ['n', 'm', 'p', 'A', 'B', 'C', 'D', 'Y', 'Q', 'R', 'W', 'V', 'U']
system_data = dict(((variable_name, variable) for variable_name, variable in zip(variable_names, variables)))
pickle_export(dirname_out, filename_out, system_data)
def load_system(filename_in):
system_data = pickle_import(filename_in)
variable_names = ['n', 'm', 'p', 'A', 'B', 'C', 'D', 'Y', 'Q', 'R', 'W', 'V', 'U']
return [system_data[variable] for variable in variable_names]
| 5,967 | 31.434783 | 115 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/gdare.py
|
# Combined estimator and controller design for multiplicative noise LQG
import numpy as np
import numpy.linalg as la
import numpy.random as npr
import matplotlib.pyplot as plt
from utility.matrixmath import specrad, vec, mat
def mat_diff(X, Y):
return la.norm(X-Y, ord=2, axis=(1, 2))
def rand_psd(d, s=None):
# Generate a random d x d positive semidefinite matrix
E = np.diag(npr.rand(d))
U = npr.randn(d, d)
V = la.qr(U)[0]
P = V.dot(E.dot(V.T))
if s is not None:
P *= s/specrad(P)
return P
# TODO move to problem_data_gen
def make_noise_data(Sigma, d1, d2):
d12 = d1*d2
a, V = la.eig(Sigma)
Aa = np.zeros([d12, d1, d2])
for i in range(d12):
Aa[i] = V[:, i].reshape(d1, d2)
return a, Aa
# TODO move to problem_data_gen
def make_system_data(n=4, m=3, p=2, r=0.99, s=1.0):
# nominal system matrices
A = npr.randn(n, n)
A *= r/specrad(A)
B = npr.randn(n, m)
C = npr.randn(p, n)
# Multiplicative noise variances
SigmaA = rand_psd(n*n, s=0.001*s) # State multiplicative noise covariance
SigmaB = rand_psd(n*m, s=0.001*s) # Input multiplicative noise covariance
SigmaC = rand_psd(n*p, s=0.001*s) # Output multiplicative noise covariance
a, Aa = make_noise_data(SigmaA, n, n)
b, Bb = make_noise_data(SigmaB, n, m)
c, Cc = make_noise_data(SigmaC, p, n)
# Penalty matrices
Q = 1.0*np.eye(n) # state cost
R = 1.0*np.eye(m) # action cost
# Noise variances
W = 0.1*np.eye(n) # process noise
V = 0.1*np.eye(p) # sensor noise
U = np.zeros([n, p]) # process-sensor cross-covariance
sysdata = dict(A=A, B=B, C=C, a=a, Aa=Aa, b=b, Bb=Bb, c=c, Cc=Cc, Q=Q, R=R, W=W, V=V, U=U, n=n, m=m, p=p)
return sysdata
# TODO move to problem_data_gen
def example_sysdata(beta=0.1):
# de Koning 1992 example
# beta = 0.0 deterministic case, beta = 0.2 ms-compensatable, beta = 0.3 not ms-compensatable
n = 2
m = 1
p = 1
A = np.array([[0.7092, 0.3017],
[0.1814, 0.9525]])
B = np.array([[0.7001],
[0.1593]])
C = np.array([[0.3088, 0.5735]])
a = np.copy(beta)[None]
Aa = np.copy(A)[None]
b = np.copy(beta)[None]
Bb = np.copy(B)[None]
c = np.copy(beta)[None]
Cc = np.copy(C)[None]
Q = np.diag([0.7350, 0.9820])
R = 0.6644*np.eye(m)
W = np.diag([0.5627, 0.7357])
V = 0.2588*np.eye(p)
U = np.zeros([n, p])
sysdata = dict(A=A, B=B, C=C, a=a, Aa=Aa, b=b, Bb=Bb, c=c, Cc=Cc, Q=Q, R=R, W=W, V=V, U=U, n=n, m=m, p=p)
return sysdata
def unpack(sysdata):
return [np.asarray(sysdata[key]) for key in ['A', 'B', 'C', 'a', 'Aa', 'b', 'Bb', 'c', 'Cc',
'Q', 'R', 'W', 'V', 'U', 'n', 'm', 'p']]
def qfun(X, sysdata):
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
X1, X2, X3, X4 = [X[i] for i in range(4)]
# Control Q-function (G)
Gxu = np.dot(A.T, np.dot(X1, B))
Gux = Gxu.T # = np.dot(B.T, np.dot(X1, A))
Guu = R + np.dot(B.T, np.dot(X1, B)) \
+ np.einsum('x,xji,jk,xkl->il', b, Bb, X1, Bb) \
+ np.einsum('x,xji,jk,xkl->il', b, Bb, X2, Bb)
# Estimator Q-function (H)
# Hxy = np.dot(A, np.dot(X3, C.T)) # known correct if U == 0
Hxy = U + np.dot(A, np.dot(X3, C.T)) # use for U != 0
Hyx = Hxy.T # = np.dot(C, np.dot(X3, A.T))
Hyy = V + np.dot(C, np.dot(X3, C.T)) \
+ np.einsum('x,xij,jk,xlk->il', c, Cc, X3, Cc) \
+ np.einsum('x,xij,jk,xlk->il', c, Cc, X4, Cc)
# Compute gains for use in computing the Gxx, Hxx blocks
K = -la.solve(Guu, Gux) # Control gain u = K*x
L = la.solve(Hyy, Hyx).T # Estimator gain xhat = A*x + B*u + L*(y - C*xhat)
LX2L = np.dot(L.T, np.dot(X2, L))
KX4K = np.dot(K, np.dot(X4, K.T))
Gxx = Q + np.dot(A.T, np.dot(X1, A)) \
+ np.einsum('x,xji,jk,xkl->il', a, Aa, X1, Aa) \
+ np.einsum('x,xji,jk,xkl->il', a, Aa, X2, Aa) \
+ np.einsum('x,xji,jk,xkl->il', c, Cc, LX2L, Cc)
Hxx = W + np.dot(A, np.dot(X3, A.T)) \
+ np.einsum('x,xij,jk,xlk->il', a, Aa, X3, Aa) \
+ np.einsum('x,xij,jk,xlk->il', a, Aa, X4, Aa) \
+ np.einsum('x,xij,jk,xlk->il', b, Bb, KX4K, Bb)
# Put the blocks together
G = np.block([[Gxx, Gxu],
[Gux, Guu]])
H = np.block([[Hxx, Hxy],
[Hyx, Hyy]])
return G, H
def gain(X, sysdata, return_qfun=False):
n, m, p = [sysdata[key] for key in ['n', 'm', 'p']]
# Get Q function
G, H = qfun(X, sysdata)
Gux = G[n:n+m, 0:n]
Guu = G[n:n+m, n:n+m]
Hyx = H[n:n+p, 0:n]
Hyy = H[n:n+p, n:n+p]
# Compute gains
K = -la.solve(Guu, Gux) # Control gain u = K*x
L = la.solve(Hyy, Hyx).T # Estimator gain xhat = A*x + B*u + L*(y - C*xhat)
if return_qfun:
return K, L, G, H
else:
return K, L
def ricc(X, sysdata):
# Riccati operator for multiplicative noise LQG
# See W.L. de Koning, TAC 1992 https://ieeexplore.ieee.org/document/135491
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
# Get gain and Q function
K, L, G, H = gain(X, sysdata, return_qfun=True)
Gxx = G[0:n, 0:n]
Gxu = G[0:n, n:n+m]
Gux = G[n:n+m, 0:n]
Guu = G[n:n+m, n:n+m]
Hxx = H[0:n, 0:n]
Hxy = H[0:n, n:n+p]
Hyx = H[n:n+p, 0:n]
Hyy = H[n:n+p, n:n+p]
# Closed-loop system matrices
ABK = A + np.dot(B, K)
ALC = A - np.dot(L, C)
# Form the RHS
Z1 = np.dot(Gxu, la.solve(Guu, Gux))
Z3 = np.dot(Hxy, la.solve(Hyy, Hyx))
Y1 = Gxx - Z1
Y2 = np.dot(ALC.T, np.dot(X[1], ALC)) + Z1
Y3 = Hxx - Z3
Y4 = np.dot(ABK, np.dot(X[3], ABK.T)) + Z3
Y = np.stack([Y1, Y2, Y3, Y4])
return Y
def gdlyap(sysdata, K, L, primal=True, solver='direct', check_stable=True, P0=None, max_iters=1000):
"""
(G)eneralized (d)iscrete-time (Lyap)unov equation solver for input-state-output systems with multiplicative noise
sysdata: dict, with all problem data for input-state-output systems with multiplicative noise
K, L: matrices, control and estimator gains
primal: bool, True for the 'prime' problem which yields the cost/value matrix,
False for the 'dual' problem which yields the steady-state second moment matrix
solver: string, 'direct' is a one-shot solver that turns the matrix equation
into a vector system of equations and solves using generic linear system solver in numpy.linalg.
'smith' is a basic iterative solver, requires an initial guess and termination criterion,
most useful for large n, but convergence speed depends on the spectral radius.
Best results when warm-starting near the solution.
check_stable: bool, True will check whether the closed-loop system is ms-stable
and return a matrix of np.inf if not ms-stable.
Only set to False if it is known beforehand that (K, L) is ms-stabilizing.
P0: matrix, initial guess for Smith method
max_iters: int, maximum number of iterations for Smith method
"""
# Extract problem data
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
# Intermediate quantities
BK = np.dot(B, K)
LC = np.dot(L, C)
F = A + BK - LC
if solver == 'direct':
Phi = np.block([[A, BK],
[LC, F]])
zA = np.zeros_like(A)
zBK = np.zeros_like(BK)
zLC = np.zeros_like(LC)
zF = np.zeros_like(F)
if primal:
# Build the closed-loop quadratic cost transition operator
PhiPhi = np.kron(Phi.T, Phi.T)
for i in range(a.size):
PhiAa = np.block([[Aa[i], zBK],
[zLC, zF]])
PhiPhi += a[i]*np.kron(PhiAa.T, PhiAa.T)
for i in range(b.size):
PhiBb = np.block([[zA, np.dot(Bb[i], K)],
[zLC, zF]])
PhiPhi += b[i]*np.kron(PhiBb.T, PhiBb.T)
for i in range(c.size):
PhiCc = np.block([[zA, zBK],
[np.dot(L, Cc[i]), zF]])
PhiPhi += c[i]*np.kron(PhiCc.T, PhiCc.T)
# Build the penalty matrix
Qprime = np.block([[Q, np.zeros([n, n])],
[np.zeros([n, n]), np.dot(K.T, np.dot(R, K))]])
vQprime = vec(Qprime)
# Solve
if check_stable:
r = specrad(PhiPhi)
if r > 1:
return np.full((2*n, 2*n), np.inf)
vP = la.solve(np.eye((2*n)*(2*n)) - PhiPhi, vQprime)
P = mat(vP)
return P
else:
# Build the closed-loop second moment transition operator
PhiPhi = np.kron(Phi, Phi)
for i in range(a.size):
PhiAa = np.block([[Aa[i], zBK],
[zLC, zF]])
PhiPhi += a[i]*np.kron(PhiAa, PhiAa)
for i in range(b.size):
PhiBb = np.block([[zA, np.dot(Bb[i], K)],
[zLC, zF]])
PhiPhi += b[i]*np.kron(PhiBb, PhiBb)
for i in range(c.size):
PhiCc = np.block([[zA, zBK],
[np.dot(L, Cc[i]), zF]])
PhiPhi += c[i]*np.kron(PhiCc, PhiCc)
# Build the penalty matrix
# Wprime = np.block([[W, np.zeros([n, n])],
# [np.zeros([n, n]), np.dot(L, np.dot(V, L.T))]]) # known correct if U == 0
Wprime = np.block([[W, np.dot(U, L.T)],
[np.dot(L, U.T), np.dot(L, np.dot(V, L.T))]]) # use if U != 0
vWprime = vec(Wprime)
# Solve
if check_stable:
r = specrad(PhiPhi)
if r > 1:
return np.full((2*n, 2*n), np.inf)
vS = la.solve(np.eye((2*n)*(2*n)) - PhiPhi, vWprime)
S = mat(vS)
return S
elif solver == 'smith':
# Initialize
if P0 is None:
P = np.zeros([n + n, n + n])
else:
P = np.copy(P0)
# Form the right-hand side to iterate as a fixed-point operator
if primal:
def rhs(P):
# Extract blocks of P
P11 = P[0:n, 0:n]
P12 = P[0:n, n:n + n]
P21 = P[n:n + n, 0:n]
P22 = P[n:n + n, n:n + n]
LP22L = np.dot(L.T, np.dot(P22, L))
# Construct the rhs as block matrix X
X11_1 = np.dot(A.T, np.dot(P11, A)) + np.einsum('x,xji,jk,xkl->il', a, Aa, P11, Aa)
X11_2 = np.dot(A.T, np.dot(P12, LC))
X11_3 = X11_2.T
X11_4 = np.dot(C.T, np.dot(LP22L, C)) + np.einsum('x,xji,jk,xkl->il', c, Cc, LP22L, Cc)
X12_1 = np.dot(A.T, np.dot(P11, BK))
X12_2 = np.dot(A.T, np.dot(P12, F))
X12_3 = np.dot(LC.T, np.dot(P21, BK))
X12_4 = np.dot(LC.T, np.dot(P22, F))
X22_1 = np.dot(BK.T, np.dot(P11, BK)) + np.dot(K.T,
np.dot(np.einsum('x,xji,jk,xkl->il', b, Bb, P11, Bb), K))
X22_2 = np.dot(BK.T, np.dot(P12, F))
X22_3 = X22_2.T
X22_4 = np.dot(F.T, np.dot(P22, F))
X11 = Q + X11_1 + X11_2 + X11_3 + X11_4
X12 = X12_1 + X12_2 + X12_3 + X12_4
X21 = X12.T
X22 = np.dot(K.T, np.dot(R, K)) + X22_1 + X22_2 + X22_3 + X22_4
X = np.block([[X11, X12],
[X21, X22]])
return X
else:
def rhs(S):
# Extract blocks of S
S11 = S[0:n, 0:n]
S12 = S[0:n, n:n + n]
S21 = S[n:n + n, 0:n]
S22 = S[n:n + n, n:n + n]
# LP22L = np.dot(L.T, np.dot(S22, L))
KS22K = np.dot(K, np.dot(S22, K.T))
# Construct the rhs as block matrix X
X11_1 = np.dot(A, np.dot(S11, A.T)) + np.einsum('x,xij,jk,xlk->il', a, Aa, S11, Aa)
X11_2 = np.dot(A, np.dot(S12, BK.T))
X11_3 = X11_2.T
X11_4 = np.dot(B, np.dot(KS22K, B.T)) + np.einsum('x,xij,jk,xlk->il', b, Bb, KS22K, Bb)
X12_1 = np.dot(A, np.dot(S11, LC.T))
X12_2 = np.dot(A, np.dot(S12, F.T))
X12_3 = np.dot(BK, np.dot(S21, LC.T))
X12_4 = np.dot(BK, np.dot(S22, F.T))
X22_1 = np.dot(LC, np.dot(S11, LC.T)) + np.dot(L, np.dot(np.einsum('x,xij,jk,xlk->il', c, Cc, S11, Cc),
L.T))
X22_2 = np.dot(LC, np.dot(S12, F.T))
X22_3 = X22_2.T
X22_4 = np.dot(F, np.dot(S22, F.T))
X11 = W + X11_1 + X11_2 + X11_3 + X11_4
# X12 = X12_1 + X12_2 + X12_3 + X12_4 # known correct if U == 0
X12 = np.dot(U, L.T) + X12_1 + X12_2 + X12_3 + X12_4 # use if U != 0
X21 = X12.T
X22 = np.dot(L, np.dot(V, L.T)) + X22_1 + X22_2 + X22_3 + X22_4
X = np.block([[X11, X12],
[X21, X22]])
return X
# Iterate
for i in range(max_iters):
P = rhs(P)
return P
def value(K, L, sysdata, *args, **kwargs):
P = gdlyap(sysdata, K, L, primal=True, *args, **kwargs)
S = gdlyap(sysdata, K, L, primal=False, *args, **kwargs)
# Build value matrices for arbitrary compensator ( Z == X if everything works properly)
if np.all(np.isfinite(P)) and np.all(np.isfinite(S)):
X = np.zeros((4, n, n))
X[0] = P[0:n, 0:n] - P[n:n+n, n:n+n]
X[1] = P[n:n+n, n:n+n]
X[2] = S[0:n, 0:n] - S[n:n+n, n:n+n]
X[3] = S[n:n + n, n:n+n]
else:
X = np.full((4, n, n), np.inf)
return X
def cost(K, L, sysdata, primal=True):
X = value(K, L, sysdata)
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
# Performance criterion perf_crit computed using primal True and False are equal if everything works properly
if primal:
term_xx = np.dot(Q, X[2])
term_uu = np.dot(Q + np.dot(K.T, np.dot(R, K)), X[3])
perf_crit = np.trace(term_xx + term_uu)
else:
term_xx = np.dot(W, X[0])
term_yy = np.dot(W + np.dot(L, np.dot(V, L.T)), X[1])
perf_crit = np.trace(term_xx + term_yy)
return perf_crit
def get_initial_gains(sysdata, method='perturb_are', cost_factor=10.0, scale_factor=1.01):
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
if method == 'perturb_are':
# Get initial gains by perturbing the Riccati solution found by value iteration
# Get optimal gains by value iteration
X0 = np.stack([np.zeros([n, n]), np.zeros([n, n]), np.zeros([n, n]), np.zeros([n, n])])
X = value_iteration(sysdata, X0)
Kopt, Lopt = gain(X, sysdata)
# Random perturbation directions with unit Frobenius norm
Kp = npr.randn(*Kopt.shape)
Kp /= la.norm(Kp, ord='fro')
Lp = npr.randn(*Lopt.shape)
Lp /= la.norm(Lp, ord='fro')
# Initial perturbation scale
scale = 0.01
K = Kopt + scale*Kp
L = Lopt + scale*Lp
c_opt = cost(Kopt, Lopt, sysdata)
c = cost(K, L, sysdata)
# Increase perturbation scale until cost is worse than the optimum by prescribed cost_factor
while c < cost_factor*c_opt:
scale *= scale_factor
K = Kopt + scale*Kp
L = Lopt + scale*Lp
c = cost(K, L, sysdata)
# Take one step back to ensure cost_factor is an upper bound
scale /= scale_factor
K = Kopt + scale*Kp
L = Lopt + scale*Lp
elif method == 'zero':
K = np.zeros([m, n])
L = np.zeros([n, p])
else:
raise ValueError
return K, L
def value_iteration(sysdata=None, X=None, tol=1e-9, dmax=1e99, max_iters=1000, verbose=False):
if sysdata is None:
raise ValueError('sysdata must not be None!')
n = sysdata['n']
if X is None:
X = np.zeros([4, n, n])
X_prev = np.copy(X)
i = 0
while True:
X = ricc(X_prev, sysdata)
d = mat_diff(X, X_prev)
if verbose:
spacer = ' '
print('%6d' % i, end=spacer)
print(d, end=spacer) # Riccati residual
# print(np.sort(np.real(la.eig(X_prev[0] - X[0])[0]))) # eigs of X mat_diff from last iter
print(cost(*gain(X, sysdata), sysdata))
if np.all(d < tol):
return X
elif np.any(d > dmax):
# Divergence
return None
if i >= max_iters:
# Timeout
return None
X_prev = np.copy(X)
i += 1
def rollout(K, L, x0, xhat0, T=1000):
# Preallocate
x = np.zeros([T+1, n])
u = np.zeros([T, m])
y = np.zeros([T, p])
xhat = np.zeros([T+1, n])
# Initialize
x[0] = x0
xhat[0] = xhat0
# Simulate
for t in range(T):
# Noises
At = A + np.einsum('i,ijk', np.sqrt(a)*npr.randn(a.size), Aa)
Bt = B + np.einsum('i,ijk', np.sqrt(b)*npr.randn(b.size), Bb)
Ct = C + np.einsum('i,ijk', np.sqrt(c)*npr.randn(c.size), Cc)
w = npr.multivariate_normal(np.zeros(n), W)
v = npr.multivariate_normal(np.zeros(p), V)
# Simulation step
y[t] = np.dot(Ct, x[t]) + v
u[t] = np.dot(K, xhat[t])
xhat[t+1] = np.dot(A, xhat[t]) + np.dot(B, u[t]) + np.dot(L, y[t] - np.dot(C, xhat[t]))
x[t+1] = np.dot(At, x[t]) + np.dot(Bt, u[t]) + w
return x, u, y, xhat
if __name__ == "__main__":
plt.close('all')
# plt.style.use('utility/conlab.mplstyle')
# # Random problem data
# seed = 1
# npr.seed(seed)
# sysdata = make_system_data(n=4, m=3, p=2, r=0.99, s=1.0)
# Example problem data
seed = 1
npr.seed(seed)
sysdata = example_sysdata(beta=0.2)
A, B, C, a, Aa, b, Bb, c, Cc, Q, R, W, V, U, n, m, p = unpack(sysdata)
# Initialize value matrix
# X0 = np.zeros((4, n, n))
# K0, L0 = gain(X0, sysdata)
K0, L0 = get_initial_gains(sysdata, cost_factor=100.0)
X0 = value(K0, L0, sysdata) # Initialize with value matrices from an initial ms-stabilizing compensator
# Value iteration
print('value iteration')
X = value_iteration(sysdata, X0, verbose=True)
print('')
# Compute gains
K, L = gain(X, sysdata)
BK = np.dot(B, K)
LC = np.dot(L, C)
F = A + BK - LC
print(cost(K0, L0, sysdata))
print(cost(K, L, sysdata))
# Simulation
# Initial state and state estimate
x0 = 10*(npr.choice([2, -2]) + 2*npr.rand(n) - 1)
# xhat0 = 10*(2*npr.rand(n) - 1)
xhat0 = np.copy(x0)
# Set up plot
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
# Closed-loop response w/ optimal gains
x, u, y, xhat = rollout(K, L, x0, xhat0, T=1000)
ax[0].plot(x, alpha=0.5)
ax[0].set_xlabel('time')
ax[0].set_ylabel('states')
# Closed-loop response w/ initial gains
x, u, y, xhat = rollout(K0, L0, x0, xhat0, T=1000)
ax[1].plot(x, alpha=0.5)
ax[1].set_xlabel('time')
ax[1].set_ylabel('states')
fig.tight_layout()
| 19,716 | 33.290435 | 120 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/misc.py
|
def get_entry(arr, i, j, p, m):
# Helper function to deal with array indexing
# arr is the array e.g. fig, arr = plt.subplots(nrows=p, ncols=m)
# i, j are the row, column index
# p, m are the number of rows, columns respectively
if p > 1:
if m > 1:
ax = arr[i, j]
else:
ax = arr[i]
else:
if m > 1:
ax = arr[j]
else:
ax = arr
return ax
| 442 | 26.6875 | 69 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/sysid.py
|
# Copied & modified from the files
# SIPPY/sippy/__init__.py
# SIPPY/sippy/OLSims_methods.py
# in the SIPPY package
import sys
import numpy as np
from sippy.OLSims_methods import SS_model, check_types, check_inputs, extracting_matrices, forcing_A_stability, \
SVD_weighted, algorithm_1, SS_lsim_process_form
from sippy.functionsetSIM import rescale, old_div, Vn_mat, K_calc
def system_identification(y, u, id_method='N4SID',
tsample=1.0, SS_f=None, SS_threshold=0.1,
SS_max_order=np.NaN, SS_fixed_order=np.NaN,
SS_D_required=False, SS_A_stability=False,
return_residuals=False):
y = 1.0 * np.atleast_2d(y)
u = 1.0 * np.atleast_2d(u)
[n1, n2] = y.shape
ydim = min(n1, n2)
ylength = max(n1, n2)
if ylength == n1:
y = y.T
[n1, n2] = u.shape
ulength = max(n1, n2)
udim = min(n1, n2)
if ulength == n1:
u = u.T
# Checking data consistency
if ulength != ylength:
sys.stdout.write("\033[0;35m")
print("Warning! y and u lengths are not the same. The minor value between the two lengths has been chosen. "
"The performed identification may be not correct, be sure to check your input and output data alignment")
sys.stdout.write(" ")
# Recasting data cutting out the over numbered data
minlength = min(ulength, ylength)
y = y[:, :minlength]
u = u[:, :minlength]
if SS_f is None:
if np.isfinite(SS_fixed_order):
SS_f = SS_fixed_order
# if np.isfinite(SS_fixed_order):
# if SS_f < SS_fixed_order:
# print("Warning! The horizon length has been chosen as less than the system order n. "
# "Recommend increasing so SS_f >= n!")
A, B, C, D, Vn, Q, R, S, K, res = OLSims(y, u, SS_f, id_method, SS_threshold,
SS_max_order, SS_fixed_order,
SS_D_required, SS_A_stability)
model = SS_model(A, B, C, D, K, Q, R, S, tsample, Vn)
if return_residuals:
return model, res
else:
return model
def OLSims(y, u, f, weights='N4SID', threshold=0.1, max_order=np.NaN, fixed_order=np.NaN,
D_required=False, A_stability=False):
y = 1. * np.atleast_2d(y)
u = 1. * np.atleast_2d(u)
l, L = y.shape
m = u[:, 0].size
if not check_types(threshold, max_order, fixed_order, f):
return np.array([[0.0]]), np.array([[0.0]]), np.array([[0.0]]), np.array(
[[0.0]]), np.inf, [], [], [], []
else:
threshold, max_order = check_inputs(threshold, max_order, fixed_order, f) # threshold, max_order = 0, fixed_order
N = L - 2*f + 1
# # This chunk enables standardization of the input and output data
# Ustd = np.zeros(m)
# Ystd = np.zeros(l)
# for j in range(m):
# Ustd[j], u[j] = rescale(u[j])
# for j in range(l):
# Ystd[j], y[j] = rescale(y[j])
# This chunk disables standardization of the input and output data
Ustd = np.ones(m)
Ystd = np.ones(l)
# for j in range(m):
# Ustd[j], u[j] = rescale(u[j])
# for j in range(l):
# Ystd[j], y[j] = rescale(y[j])
U_n, S_n, V_n, W1, O_i = SVD_weighted(y, u, f, l, weights)
Ob, X_fd, M, n, residuals = algorithm_1(y, u, l, m, f, N, U_n, S_n, V_n, W1, O_i, threshold,
max_order, D_required)
if A_stability:
M, residuals[0:n, :], useless = forcing_A_stability(M, n, Ob, l, X_fd, N, u, f)
A, B, C, D = extracting_matrices(M, n)
Covariances = old_div(np.dot(residuals, residuals.T), (N - 1))
Q = Covariances[0:n, 0:n]
R = Covariances[n:, n:]
S = Covariances[0:n, n:]
X_states, Y_estimate = SS_lsim_process_form(A, B, C, D, u)
Vn = Vn_mat(y, Y_estimate)
K, K_calculated = K_calc(A, C, Q, R, S)
for j in range(m):
B[:, j] = old_div(B[:, j], Ustd[j])
D[:, j] = old_div(D[:, j], Ustd[j])
for j in range(l):
C[j, :] = C[j, :] * Ystd[j]
D[j, :] = D[j, :] * Ystd[j]
if K_calculated:
K[:, j] = old_div(K[:, j], Ystd[j])
return A, B, C, D, Vn, Q, R, S, K, residuals
| 4,473 | 36.915254 | 122 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/config.py
|
import os
this_dir, this_filename = os.path.split(__file__)
EXPERIMENT_FOLDER = os.path.join(this_dir, '..', 'experiments')
| 126 | 20.166667 | 63 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/compensator_design.py
|
"""
Functions for linear dynamic compensator design.
"""
from time import time
from dataclasses import dataclass
import numpy as np
from utility.matrixmath import mdot, specrad, minsv, lstsqb, dlyap, dare, dare_gain
from utility.printing import create_tag
from rocoboom_out.common.gdare import value_iteration, gain
@dataclass
class Compensator:
"""
Compensator structure: (F, K, L)
xhat[t+1] = F @ xhat[t] + L @ y[t]
u[t] = K @ xhat[t]
"""
F: np.ndarray
K: np.ndarray
L: np.ndarray
def sysmat_cl(A, B, C, K, L):
# Model matrix used by compensator to propagate state
return A + np.dot(B, K) - np.dot(L, C)
def aug_sysmat_cl(A, B, C, F, K, L):
# Closed loop system matrix of the joint system of states and state estimates
return np.block([[A, np.dot(B, K)],
[np.dot(L, C), F]])
def make_ce_compensator(model, Y, R):
# Model information
A = model.A
B = model.B
C = model.C
W = model.Q
V = model.R
U = model.S
# Penalty information
Q = np.dot(C.T, np.dot(Y, C)) # Convert output penalty to state penalty in model coordinates
# Solve Riccati equations to get control and estimator value matrices, gains
P, K = dare_gain(A, B, Q, R)
S, L = dare_gain(A.T, C.T, W, V, E=None, S=U)
L = -L.T
# Create the model matrix & compensator
F = sysmat_cl(A, B, C, K, L)
compensator = Compensator(F, K, L)
return compensator
def make_compensator(model, uncertainty, Y, R, noise_pre_scale=1.0, noise_post_scale=1.0,
bisection_epsilon=0.01, log_diagnostics=False):
if log_diagnostics:
time_start = time()
# Model information
A = model.A
B = model.B
C = model.C
n, m, p = A.shape[0], B.shape[1], C.shape[0]
# TODO make sure the scaling is right for these since docs say they are with respect to outputs with unit variance
W = model.Q
V = model.R
U = model.S
# Penalty information
Q = np.dot(C.T, np.dot(Y, C)) # Convert output penalty to state penalty in model coordinates
tag_list = []
# TODO account for correlation between A, B, C multiplicative noises in gdare
def solve_gdare(sysdata, X0=None, solver=None, solver_kwargs=None):
if solver is None:
solver = value_iteration
if solver_kwargs is None:
solver_kwargs = dict(tol=1e-6, max_iters=400)
return solver(sysdata, X0, **solver_kwargs)
if uncertainty is None or noise_post_scale == 0:
compensator = make_ce_compensator(model, Y, R)
scale = 0.0
else:
# Uncertainty information
a = uncertainty.a
Aa = uncertainty.Aa
b = uncertainty.b
Bb = uncertainty.Bb
c = uncertainty.c
Cc = uncertainty.Cc
def make_sysdata(scale=1.0):
# Prep data for GDARE solver
return dict(A=A, B=B, C=C, # Mean
a=scale*a, Aa=Aa, b=scale*b, Bb=Bb, c=scale*c, Cc=Cc, # Variance
Q=Q, R=R, W=W, V=V, U=U, # Penalties
n=n, m=m, p=p) # Dimensions
cs_lwr = 1.0
scale = cs_lwr*noise_pre_scale
sysdata = make_sysdata(scale=scale)
# Warm-start from noiseless case
# TODO use dare() instead, find expression for P, Phat, S, Shat = X[0], X[1], X[2], X[3]
X0 = solve_gdare(make_sysdata(scale=0))
X = solve_gdare(sysdata, X0)
if X is None:
# If assumed multiplicative noise variance is too high to admit solution, decrease noise variance
# Bisection on the noise variance scaling to find the control
# when the noise just touches the stability boundary
cs_upr = 1.0
cs_lwr = 0.0
while cs_upr - cs_lwr > bisection_epsilon:
if log_diagnostics:
tag_list.append(create_tag("[bisection_lwr bisection_upr] = [%.6f %.6f]" % (cs_lwr, cs_upr)))
cs_mid = (cs_upr + cs_lwr)/2
scale = cs_mid*noise_pre_scale
sysdata = make_sysdata(scale=scale)
X = solve_gdare(sysdata, X0)
if X is None:
cs_upr = cs_mid
else:
cs_lwr = cs_mid
X0 = np.copy(X)
scale = cs_lwr*noise_pre_scale
if log_diagnostics:
tag_list.append(create_tag('Scaled noise variance by %.6f' % scale))
if scale > 0:
sysdata = make_sysdata(scale=scale)
X = solve_gdare(sysdata, X0)
if X is None:
if log_diagnostics:
tag_list.append(create_tag('GAIN NOT FOUND BY DARE_MULT, INCREASE SOLVER PRECISION',
message_type='fail'))
tag_list.append(create_tag('Falling back on cert-equiv gain', message_type='fail'))
compensator = make_ce_compensator(model, Y, R)
scale = 0.0
return compensator, scale, tag_list
else:
if log_diagnostics:
tag_list.append(create_tag('Bisection collapsed to cert-equiv'))
compensator = make_ce_compensator(model, Y, R)
scale = 0.0
return compensator, scale, tag_list
if noise_post_scale < 1:
scale = cs_lwr*noise_pre_scale*noise_post_scale
sysdata = make_sysdata(scale=scale)
X = solve_gdare(sysdata, X0)
if X is None:
raise Exception('MLQG problem did not solve, check ms-compensatability! This should not have happened...')
# P, Phat, S, Shat = X[0], X[1], X[2], X[3]
# Get gains
K, L = gain(X, sysdata)
# Create the model matrix
F = sysmat_cl(A, B, C, K, L)
compensator = Compensator(F, K, L)
if log_diagnostics:
time_end = time()
time_elapsed = time_end - time_start
tag_list.append(create_tag("time to make compensator: %f" % time_elapsed))
return compensator, scale, tag_list
| 6,203 | 32.354839 | 119 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/__init__.py
| 0 | 0 | 0 |
py
|
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/signal_gen.py
|
from dataclasses import dataclass, field
import numpy as np
import numpy.random as npr
from SIPPY.sippy import functionset as fset
@dataclass
class SigParam:
method: str
mean: float = None
scale: float = 1.0
ma_length: int = None
options: dict = field(default_factory=dict)
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def make_sig(T, m=1, params=None):
# Function that creates signal e.g. for exploratory control actions
if params is None:
params = [SigParam(method='gbn')]
u_hist = np.zeros([T, m])
for i in range(m):
for param in params:
method = param.method
ma_length = param.ma_length
mean = param.mean
scale = param.scale
# Increase time length if using moving average
if ma_length == 0 or ma_length is None:
Ti = T
else:
Ti = T + ma_length - 1
# Create the signal
if method == 'gbn':
# Set the switching probability
if 'p_swd' not in param.options:
p_swd = 0.1
else:
p_swd = param.options['p_swd']
sig = fset.GBN_seq(Ti, p_swd=p_swd)[0] # Binary switching signal
elif method == 'wgn':
u_explore_var = 1.0
sig = np.sqrt(u_explore_var)*npr.randn(Ti) # White Gaussian noise
elif method == 'rgw':
sig = fset.RW_seq(Ti, 0, sigma=1.0) # Random Gaussian walk
elif method == 'zeros':
sig = np.zeros(Ti)
elif method == 'ones':
sig = np.ones(Ti)
else:
raise ValueError('Invalid signal generation method!')
# Moving average
if ma_length == 0 or ma_length is None:
pass
else:
sig = moving_average(sig, ma_length)
# Centering
if mean is not None:
sig += mean - np.mean(sig)
# Scaling
sig *= scale
# Add signal component to the u_hist
u_hist[:, i] += sig
return u_hist
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.close('all')
npr.seed(1)
params = [SigParam(method='gbn', mean=0.0, scale=1.0, ma_length=4),
SigParam(method='gbn', mean=0.0, scale=1.0, ma_length=8),
SigParam(method='rgw', mean=0.0, scale=0.2, ma_length=20)]
sig = make_sig(T=100, params=params)
plt.plot(sig)
| 2,603 | 28.590909 | 82 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/compensator_eval.py
|
"""
Functions for linear dynamic compensator evaluation.
"""
from dataclasses import dataclass
import numpy as np
from utility.matrixmath import mdot, specrad, minsv, solveb, lstsqb, dlyap, dare, dare_gain
from rocoboom_out.common.compensator_design import aug_sysmat_cl
def specrad_cl(A, B, C, F, K, L):
Phi = aug_sysmat_cl(A, B, C, F, K, L)
return specrad(Phi)
@dataclass
class Performance:
sr: float # Spectral radius
ihc: float # Infinite horizon cost
def compute_performance(A, B, C, Q, R, W, V, F, K, L, primal=True):
# Compute the closed-loop spectral radius and performance criterion
n, m, p = A.shape[0], B.shape[1], C.shape[0]
Phi = aug_sysmat_cl(A, B, C, F, K, L)
sr = specrad(Phi)
if sr > 1:
ihc = np.inf
else:
KRK = np.dot(K.T, np.dot(R, K))
Qprime = np.block([[Q, np.zeros([n, n])],
[np.zeros([n, n]), KRK]])
LVL = np.dot(L, np.dot(V, L.T))
Wprime = np.block([[W, np.zeros([n, n])],
[np.zeros([n, n]), LVL]])
# Theoretically these two quantities should be equal if the problem is well-posed.
if primal:
# Primal i.e. compute performance criterion from steady-state value matrix
Pprime = dlyap(Phi.T, Qprime)
ihc = np.trace(np.dot(Wprime, Pprime))
else:
# Dual i.e. compute performance criterion from steady-state covariance matrix
Sprime = dlyap(Phi, Wprime)
ihc = np.trace(np.dot(Qprime, Sprime))
return Performance(sr, ihc)
| 1,591 | 28.481481 | 91 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/monte_carlo_loader.py
|
"""
Robust adaptive control via multiplicative noise from bootstrapped uncertainty estimates.
"""
# Authors: Ben Gravell and Tyler Summers
import os
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from utility.pickle_io import pickle_import
from rocoboom_out.common.config import EXPERIMENT_FOLDER
from rocoboom_out.common.problem_data_gen import load_system
from rocoboom_out.common.plotting import multi_plot_paper
# from rocoboom_out.common.plotting import multi_plot
def cat_dict(base, layer):
for key in base.keys():
if type(base[key]) is dict and type(layer[key]) is dict:
cat_dict(base[key], layer[key])
elif type(base[key]) is np.ndarray and type(layer[key]) is np.ndarray:
base[key] = np.vstack([base[key], layer[key]])
elif type(base[key]) is list and type(layer[key]) is list:
base[key].append(layer[key])
else:
pass
return
def load_results(experiment):
dirname_in = os.path.join(experiment_folder, experiment)
filename_in = 'comparison_results' + '.pickle'
path_in = os.path.join(dirname_in, filename_in)
data_in = pickle_import(path_in)
output_dict, cost_are_true, t_hist, t_start_estimate, t_evals = data_in
return output_dict, cost_are_true, t_hist, t_start_estimate, t_evals
def aggregate_experiment_results(experiments):
output_dict = None
for i, experiment in enumerate(experiments):
output_dict_i, cost_are_true, t_hist, t_start_estimate, t_evals = load_results(experiment)
if output_dict is None:
output_dict = copy(output_dict_i)
else:
cat_dict(output_dict, output_dict_i)
return output_dict, cost_are_true, t_hist, t_start_estimate, t_evals
def load_problem_data(experiment):
# Load simulation options
dirname_in = os.path.join(experiment_folder, experiment)
filename_in = 'sim_options.pickle'
path_in = os.path.join(dirname_in, filename_in)
sim_options = pickle_import(path_in)
# training_type = sim_options['training_type']
# testing_type = sim_options['testing_type']
Ns = sim_options['Ns']
Nb = sim_options['Nb']
T = sim_options['T']
system_idx = sim_options['system_idx']
seed = sim_options['seed']
bisection_epsilon = sim_options['bisection_epsilon']
t_start_estimate = sim_options['t_start_estimate']
t_explore = sim_options['t_explore']
u_explore_var = sim_options['u_explore_var']
u_exploit_var = sim_options['u_exploit_var']
# Load system definition
filename_in = 'system_dict.pickle'
path_in = os.path.join(dirname_in, filename_in)
n, m, p, A, B, C, D, Y, Q, R, W, V, U = load_system(path_in)
return [Ns, Nb, T, system_idx, seed,
bisection_epsilon, t_start_estimate, t_explore, u_explore_var, u_exploit_var,
n, m, p, A, B, C, D, Y, Q, R, W, V, U]
def get_last_dir(parent_folder):
num_max = 0
name_max = None
for name in next(os.walk(parent_folder))[1]:
try:
num = float(name.split('_')[0].replace('p', '.'))
if num > num_max:
num_max = num
name_max = name
except:
pass
return name_max
def get_dirs_by_time(parent_folder, time_min, time_max):
dirs = []
for name in next(os.walk(parent_folder))[1]:
try:
num = float(name.split('_')[0].replace('p', '.'))
if time_min <= num <= time_max+1:
dirs.append(name)
except:
pass
return dirs
if __name__ == "__main__":
experiment_folder = EXPERIMENT_FOLDER
# experiment = get_last_dir(experiment_folder)
# experiment = '1637467652p5037403_Ns_10000_T_321_system_1_seed_1'
# experiment = '1637595430p9019032_Ns_10000_T_321_system_1_seed_2'
# experiment = '1637679639p3378303_Ns_10000_T_321_system_1_seed_10'
# experiment = '1637770954p6599007_Ns_10_T_321_system_1_seed_1'
# # Load the problem data into the main workspace
# # (not needed for plotting, just for convenience of data inspection using console)
# (Ns, Nb, T, system_idx, seed,
# bisection_epsilon, t_start_estimate, t_explore, u_explore_var, u_exploit_var,
# n, m, p, A, B, C, D, Y, Q, R, W, V, U) = load_problem_data(experiment)
# Load results
# output_dict, cost_are_true, t_hist, t_start_estimate, t_evals = load_results(experiment)
# Load results from multiple experiments
# experiments = ['1637770954p6599007_Ns_10_T_321_system_1_seed_1',
# '1637770969p3941662_Ns_10_T_321_system_1_seed_2',
# '1637770985p5349584_Ns_10_T_321_system_1_seed_3']
# experiments = get_dirs_by_time(experiment_folder, 1637770954, 1637770985) # 10
experiments = get_dirs_by_time(experiment_folder, 1637467652, 1637679639) # 10000
experiments.reverse() # This is only because seed=1 has extra fields that should not be catted, redo seed=1 with current code to eliminate this line
#
output_dict, cost_are_true, t_hist, t_start_estimate, t_evals = aggregate_experiment_results(experiments)
# # Print the log data, if it exists
# show_diagnostics = False
# if show_diagnostics:
# try:
# for log_str in output_dict['robust']['log_str']:
# print(log_str)
# except:
# pass
dirname_out = experiment_folder
# Plotting
plt.close('all')
multi_plot_paper(output_dict, cost_are_true, t_hist, t_start_estimate, t_evals,
plotfun_str='plot', show_print=True, show_plot=True,
save_plots=False, dirname_out=dirname_out)
# multi_plot_paper(output_dict, cost_are_true, t_hist, t_start_estimate, t_evals,
# plotfun_str='plot', show_print=True, show_plot=True,
# show_legend=False, show_grid=True,
# save_plots=False, dirname_out=dirname_out)
# multi_plot_paper(output_dict, cost_are_true, t_hist, t_start_estimate, t_evals,
# plotfun_str='plot', show_print=True, show_plot=True,
# show_legend=True, show_grid=False,
# save_plots=False, dirname_out=dirname_out)
plt.show()
| 6,271 | 37.012121 | 153 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/single.py
|
from dataclasses import dataclass
import numpy as np
import numpy.random as npr
import scipy as sc
import numpy.linalg as la
import scipy.linalg as sla
import matplotlib.pyplot as plt
import control
from utility.lti import ctrb, obsv
from utility.printing import create_tag
from rocoboom_out.common.problem_data_gen import gen_system_omni
from rocoboom_out.common.signal_gen import SigParam, make_sig
from rocoboom_out.common.sim import make_offline_data, lsim_cl
from rocoboom_out.common.sysid import system_identification
from rocoboom_out.common.ss_tools import make_ss, ss_change_coordinates
from rocoboom_out.common.uncertainty import estimate_model_uncertainty
from rocoboom_out.common.compensator_design import make_compensator
from rocoboom_out.common.compensator_eval import compute_performance
from rocoboom_out.common.misc import get_entry
@dataclass
class Result:
model: None
uncertainty: None
ss_models_boot: None
compensator: None
performance: None
design_info: dict
def get_result(method):
ss_models_boot = None
if method == 'opt':
uncertainty = None
my_model = ss_true
elif method == 'ceq':
uncertainty = None
my_model = ss_model
elif method == 'rob':
uncertainty_estimator = 'semiparametric_bootstrap'
# uncertainty_estimator = 'block_bootstrap'
uncertainty_dict = estimate_model_uncertainty(model, u_train_hist, y_train_hist, w_est, v_est, t, Nb,
uncertainty_estimator, return_models=True, log_diagnostics=True)
# TODO DEBUG ONLY
# CHEAT by using the true model and true residuals in the semiparametric bootstrap
# uncertainty_estimator = 'semiparametric_bootstrap'
# uncertainty_dict = estimate_model_uncertainty(ss_true, u_train_hist, y_train_hist,
# w_hist, v_hist, t, Nb,
# uncertainty_estimator,
# return_models=True,
# log_diagnostics=True)
uncertainty = uncertainty_dict['uncertainty']
ss_models_boot = uncertainty_dict['models_boot']
tag_list_uc = uncertainty_dict['tag_list']
for tag in tag_list_uc:
print(tag)
my_model = ss_model
else:
raise ValueError
compensator, noise_scale, tag_str_list_cg = make_compensator(my_model, uncertainty, Y, R,
noise_pre_scale, noise_post_scale, bisection_epsilon, log_diagnostics=True)
for tag in tag_str_list_cg:
print(tag)
F, K, L = compensator.F, compensator.K, compensator.L
performance = compute_performance(A, B, C, Q, R, W, V, F, K, L)
design_info = dict(noise_scale=noise_scale)
return Result(model, uncertainty, ss_models_boot, compensator, performance, design_info)
def response_plot(ss, T=None, y_ref=None, fig=None, axs=None, response_type='impulse', *args, **kwargs):
# Choose the response function
if response_type == 'impulse':
response_fun = control.impulse_response
elif response_type == 'step':
response_fun = control.step_response
else:
raise ValueError
# Get the response data
t, ys = response_fun(ss, T)
# Create plot
if fig is None:
fig, axs = plt.subplots(nrows=p, ncols=m, sharex=True, figsize=(4, 3))
for i in range(p):
for j in range(m):
ax = get_entry(axs, i, j, p, m)
y = get_entry(ys, i, j, p, m)
if y_ref is not None:
yr = get_entry(y_ref, i, j, p, m)
y -= yr
ax.plot(t, y, *args, **kwargs)
fig.tight_layout()
return t, ys, fig, axs
def comparison_response_plot(ss_true, ss_model, ss_models_boot, t_sim=None, num_models_boot_to_plot=100):
# Simulation time
if t_sim is None:
t_sim = T
t, ys_true = control.impulse_response(ss_true, t_sim)
t, ys_true, fig, axs = response_plot(ss_true, t_sim, ys_true, marker='o', alpha=0.7, zorder=10, label='True')
t, ys_model, fig, axs = response_plot(ss_model, t_sim, ys_true, fig, axs, marker='d', alpha=0.7, zorder=11, label='Nominal')
for i, ss_model_boot in enumerate(ss_models_boot):
if i > num_models_boot_to_plot:
continue
if i == 0:
label = 'Bootstrap samples'
else:
label = None
t, ys_boot, fig, axs = response_plot(ss_model_boot, t_sim, ys_true, fig, axs, color='k', alpha=0.05, zorder=1, label=label)
for i in range(p):
for j in range(m):
my_ax = get_entry(axs, i, j, p, m)
my_ax.legend()
return fig, axs
def comparison_closed_loop_plot(result_dict, t_sim=None, disturb_method='zeros', disturb_scale=1.0):
# Make initial state
x0 = np.ones(n)
# Simulation time
if t_sim is None:
t_sim = T
# Make disturbance histories
w_play_hist = make_sig(t_sim, n, [SigParam(method=disturb_method, mean=0, scale=disturb_scale)])
v_play_hist = make_sig(t_sim, p, [SigParam(method=disturb_method, mean=0, scale=disturb_scale)])
fig, axs = plt.subplots(ncols=p, sharex=True, figsize=(8, 6))
if p == 1:
axs = [axs]
for key in methods:
result = result_dict[key]
x_hist, u_hist, y_hist, xhat_hist = lsim_cl(ss_true, result.compensator, x0, w_play_hist, v_play_hist, t_sim)
for i in range(p):
axs[i].plot(y_hist[:, i], label=key)
for i in range(p):
axs[i].legend()
fig.suptitle('Comparison of closed-loop response to initial state')
return fig, axs
def matrix_distplot(M_true, M_model, M_models_boot, title_str=None, plot_type='violin',
show_true=True, show_model=True, show_boot_mean=False, show_boot_median=False,
show_samples=True, show_dist=True, show_legend=True):
d1, d2 = M_true.shape
fig, axs = plt.subplots(nrows=d1, ncols=d2, figsize=(d2*3, d1*3))
for i in range(d1):
for j in range(d2):
ax = get_entry(axs, i, j, d1, d2)
# Data extraction
x_true = M_true[i, j]
x_model = M_model[i, j]
x_models_boot = np.array([M_model_boot[i, j] for M_model_boot in M_models_boot])
x_models_boot_mean = np.mean(x_models_boot)
x_models_boot_median = np.median(x_models_boot)
if show_dist:
if plot_type == 'violin':
violinplot_parts = ax.violinplot(x_models_boot, positions=[0], showextrema=False, points=400)
for part in violinplot_parts['bodies']:
part.set_facecolor('k')
elif plot_type == 'box':
ax.boxplot(x_models_boot, positions=[0], medianprops=dict(color='C2'))
else:
raise ValueError
linex = [-0.1, 0.1]
if show_true:
ax.plot(linex, x_true*np.ones(2), c='C0', linestyle='--', lw=3, label='True')
if show_model:
ax.plot(linex, x_model*np.ones(2), c='C1', linestyle='-', lw=3, label='Nominal')
if show_boot_mean:
ax.plot(linex, x_models_boot_mean*np.ones(2), c='k', label='Bootstrap mean')
if show_boot_median:
ax.plot(linex, x_models_boot_median*np.ones(2), c='C2', label='Bootstrap median')
if show_samples:
ax.scatter(np.zeros_like(x_models_boot), x_models_boot, s=40, c='k', marker='o', alpha=0.2, label='Bootstrap samples')
# Set the ylimit
p = 1 # percentile
x_models_boot_pct_lwr = np.percentile(x_models_boot, p)
x_models_boot_pct_upr = np.percentile(x_models_boot, 100-p)
xpct_diff = x_models_boot_pct_upr - x_models_boot_pct_lwr
xlim_lwr = np.min([x_models_boot_pct_lwr, x_true-0.1*xpct_diff])
xlim_upr = np.max([x_models_boot_pct_upr, x_true+0.1*xpct_diff])
ax.set_ylim([xlim_lwr, xlim_upr])
if show_legend:
ax.legend()
fig.suptitle(title_str)
fig.tight_layout()
return fig, axs
if __name__ == "__main__":
# Options
plt.close('all')
seed = 1
npr.seed(seed)
# Number of Monte Carlo samples
Ns = 1
# Number of bootstrap samples
Nb = 100
# Simulation time
t = 20 # This is the amount of data that will be used by sysid
T = t + 1 # This is the amount of data that will be simulated
# Problem data
system_kwargs = dict()
# n, m, p, A, B, C, D, Y, Q, R, W, V, U = gen_system_omni('inverted_pendulum', **system_kwargs)
# n, m, p, A, B, C, D, Y, Q, R, W, V, U = gen_system_omni('noninverted_pendulum', **system_kwargs)
# system_kwargs = dict(n=4, m=2, p=2, spectral_radius=0.9, noise_scale=0.1, seed=1)
# n, m, p, A, B, C, D, Y, Q, R, W, V, U = gen_system_omni('rand', **system_kwargs)
n, m, p, A, B, C, D, Y, Q, R, W, V, U = gen_system_omni(system_idx=1)
ss_true = make_ss(A, B, C, D, W, V, U)
# Exploration control signal
u_explore_var = 10*(np.max(la.eig(W)[0]) + np.max(la.eig(V)[0]))
# u_explore_var = 10.0
noise_pre_scale = 1.0
noise_post_scale = 1.0
bisection_epsilon = 0.01
# Check controllability and observability of the true system
check_tags = []
if la.matrix_rank(ctrb(A, B)) < n:
check_tags.append(create_tag("The pair (A, B) is NOT controllable.", message_type='warn'))
if la.matrix_rank(obsv(Q, A)) < n:
check_tags.append(create_tag("The pair (Q, A) is NOT observable.", message_type='warn'))
if la.matrix_rank(obsv(A, C)) < n:
check_tags.append(create_tag("The pair (A, C) is NOT observable.", message_type='warn'))
if la.matrix_rank(ctrb(A - np.dot(U, la.solve(V, C)), B)) < n:
check_tags.append(create_tag("The pair (A - U V^-1 C, B) is NOT controllable.", message_type='warn'))
for tag in check_tags:
print(tag)
# Make training data
x_train_hist, u_train_hist, y_train_hist, w_hist, v_hist = make_offline_data(A, B, C, D, W, V, Ns, T, u_explore_var)
x_train_hist = x_train_hist[0]
u_train_hist = u_train_hist[0]
y_train_hist = y_train_hist[0]
w_hist = w_hist[0]
v_hist = v_hist[0]
t_train_hist = np.arange(T)
# # Plot
# fig, ax = plt.subplots(nrows=2)
# ax[0].step(t_train_hist, u_train_hist)
# ax[1].step(t_train_hist, y_train_hist)
# fig.suptitle('training data')
# ax[0].set_ylabel('input')
# ax[1].set_ylabel('output')
# Estimate the system model and residuals
model, res = system_identification(y_train_hist[0:t], u_train_hist[0:t], id_method='N4SID',
SS_fixed_order=n, return_residuals=True)
w_est = res[0:n].T
v_est = res[n:].T
Ahat = model.A
Bhat = model.B
Chat = model.C
Dhat = model.D
What = model.Q
Vhat = model.R
Uhat = model.S
ss_model = make_ss(Ahat, Bhat, Chat, Dhat, What, Vhat, Uhat)
####################################################################################################################
# # change basis for A, B, C of true system by putting it into e.g. modal form
# # That way, the estimated parameters will approach the true parameters after using the same standardizing transform
# # needs more investigation, control.canonical_form with form='modal' does not make them match at all
# ss_true_modal, true_modal_transform = control.canonical_form(ss_true, form='modal')
# ss_model_modal, model_modal_transform = control.canonical_form(ss_model, form='modal')
# ss_true_reachable, true_reachable_transform = control.canonical_form(ss_true, form='reachable')
# ss_model_reachable, model_reachable_transform = control.canonical_form(ss_model, form='reachable')
####################################################################################################################
####################################################################################################################
# This code chunk is for debug/test only
# Cannot use this practically since ss_true is not accessible
ss_model_trans, P = ss_change_coordinates(ss_true, model, method='match')
# With lots of data, ss_model_trans should match ss_true very closely in A, B, C, D
# but Q, R, S cannot be identified uniquely - see subspace ID book pg 66
print('True noise covariances')
print(ss_true.Q)
print(ss_true.R)
print(ss_true.S)
print('')
print('Estimated noise covariances')
print(ss_model_trans.Q)
print(ss_model_trans.R)
print(ss_model_trans.S)
print('')
# # Check that model transformation has no impact on the performance of the ce compensator
# compensator = make_compensator(ss_model, None, Y, R)[0]
# print(compensator)
# print(compute_performance(A, B, C, Q, R, W, V, compensator.F, compensator.K, compensator.L))
#
# compensator = make_compensator(ss_model_trans, None, Y, R)[0]
# print(compensator)
# print(compute_performance(A, B, C, Q, R, W, V, compensator.F, compensator.K, compensator.L))
####################################################################################################################
# DEBUG ONLY
# CHEAT by matching model coordinates with the true
# ss_model = ss_model_trans
# # CHEAT by using true noise covariance, appropriately transformed
# ss_model.Q = ss_true.Q
# ss_model.R = ss_true.R
# ss_model.S = ss_true.S
####################################################################################################################
# Get the results for each control synthesis method
methods = ['opt', 'ceq', 'rob']
result_dict = {method: get_result(method) for method in methods}
for method in methods:
print("%s cost = %.6f" % (method, result_dict[method].performance.ihc/result_dict['opt'].performance.ihc))
# print(result_dict['rob'].design_info)
# print(result_dict['rob'].uncertainty.a)
# print(result_dict['rob'].uncertainty.b)
# print(result_dict['rob'].uncertainty.c)
# print(result_dict['rob'].ss_models_boot)
####################################################################################################################
####################################################################################################################
ss_models_boot = result_dict['rob'].ss_models_boot
comparison_response_plot(ss_true, ss_model, ss_models_boot, t_sim=10)
# comparison_closed_loop_plot(result_dict, disturb_method='wgn', disturb_scale=0.1, t_sim=20)
####################################################################################################################
# ####################################################################################################################
# # Redesign from result data
# result = result_dict['rob']
# my_model = result.model
# uncertainty = result.uncertainty
# compensator, noise_scale, tag_str_list_cg = make_compensator(my_model, uncertainty, Y, R,
# noise_pre_scale=1.0,
# noise_post_scale=0.5,
# bisection_epsilon=bisection_epsilon)
# F, K, L = compensator.F, compensator.K, compensator.L
# performance = compute_performance(A, B, C, Q, R, W, V, F, K, L)
# for tag in tag_str_list_cg:
# print(tag)
# print(performance.ihc/result_dict['opt'].performance.ihc)
# ####################################################################################################################
####################################################################################################################
# Bootstrap model state space matrices vs true system
# Match all systems coordinates with the true system
ss_models_boot_trans = [ss_change_coordinates(ss_true, ss_model_boot, method='match')[0] for ss_model_boot in ss_models_boot]
matrix_distplot(ss_true.A, ss_model_trans.A, [ss_model_boot_trans.A for ss_model_boot_trans in ss_models_boot_trans], title_str='A')
matrix_distplot(ss_true.B, ss_model_trans.B, [ss_model_boot_trans.B for ss_model_boot_trans in ss_models_boot_trans], title_str='B')
matrix_distplot(ss_true.C, ss_model_trans.C, [ss_model_boot_trans.C for ss_model_boot_trans in ss_models_boot_trans], title_str='C')
####################################################################################################################
| 17,018 | 40.408759 | 140 |
py
|
robust-adaptive-control-multinoise-output
|
robust-adaptive-control-multinoise-output-main/rocoboom_out/common/monte_carlo.py
|
"""
Robust adaptive control from output measurements via multiplicative noise from bootstrapped uncertainty estimates.
"""
import argparse
from time import time
import os
import multiprocessing as mp
import numpy as np
import numpy.linalg as la
import numpy.random as npr
import matplotlib.pyplot as plt
from utility.matrixmath import mdot, specrad, minsv, lstsqb, dlyap, dare, dare_gain
from utility.pickle_io import pickle_export
from utility.path_utility import create_directory
from utility.user_input import yes_or_no
from utility.printing import printcolors, create_tag
from rocoboom_out.common.problem_data_gen import gen_system_omni, save_system
from rocoboom_out.common.ss_tools import make_ss, ss_change_coordinates
from rocoboom_out.common.sim import make_offline_data
from rocoboom_out.common.sysid import system_identification
from rocoboom_out.common.uncertainty import estimate_model_uncertainty
from rocoboom_out.common.compensator_design import make_compensator, sysmat_cl
from rocoboom_out.common.compensator_eval import compute_performance
from rocoboom_out.common.plotting import multi_plot_paper
# from rocoboom_out.common.plotting import multi_plot
def monte_carlo_sample(control_scheme, uncertainty_estimator, required_args,
x_train_hist, u_train_hist, y_train_hist, w_hist, v_hist,
monte_carlo_idx, print_diagnostics=False, log_diagnostics=True):
log_str = ''
if log_diagnostics:
code_start_time = time()
log_str += 'Monte Carlo sample %d \n' % (monte_carlo_idx+1)
# Unpack arguments from dictionary
n = required_args['n']
m = required_args['m']
p = required_args['p']
A = required_args['A']
B = required_args['B']
C = required_args['C']
D = required_args['D']
Y = required_args['Y']
Q = required_args['Q']
R = required_args['R']
W = required_args['W']
V = required_args['V']
U = required_args['U']
Ns = required_args['Ns']
Nb = required_args['Nb']
T = required_args['T']
x0 = required_args['x0']
bisection_epsilon = required_args['bisection_epsilon']
t_evals = required_args['t_evals']
t_start_estimate = required_args['t_start_estimate']
t_explore = required_args['t_explore']
t_cost_fh = required_args['t_cost_fh']
cost_horizon = required_args['cost_horizon']
Kare_true = required_args['Kopt']
Lare_true = required_args['Lopt']
u_explore_var = required_args['u_explore_var']
u_exploit_var = required_args['u_exploit_var']
noise_pre_scale = required_args['noise_pre_scale']
noise_post_scale = required_args['noise_post_scale']
ss_true = make_ss(A, B, C, D, W, V, U)
if control_scheme == 'certainty_equivalent':
noise_post_scale = 0
# Preallocate history arrays
# State and input
# x_test_hist = np.zeros([T+1, n])
# u_test_hist = np.zeros([T, m])
# y_test_hist = np.zeros([T, p])
# x_opt_test_hist = np.zeros([T+1, n])
# u_opt_test_hist = np.zeros([T, m])
# y_opt_test_hist = np.zeros([T, p])
# Gain
F_hist = np.zeros([T, n, n])
K_hist = np.zeros([T, m, n])
L_hist = np.zeros([T, n, p])
# Nominal model
Ahat_hist = np.full([T, n, n], np.inf)
Bhat_hist = np.full([T, n, m], np.inf)
Chat_hist = np.full([T, p, n], np.inf)
What_hist = np.full([T, n, n], np.inf)
Vhat_hist = np.full([T, p, p], np.inf)
Uhat_hist = np.full([T, n, p], np.inf)
# Model uncertainty
a_hist = np.full([T, n*n], np.inf)
b_hist = np.full([T, n*m], np.inf)
c_hist = np.full([T, p*n], np.inf)
Aahist = np.full([T, n*n, n, n], np.inf)
Bbhist = np.full([T, n*m, n, m], np.inf)
Cchist = np.full([T, p*n, p, n], np.inf)
gamma_reduction_hist = np.ones(T)
# Spectral radius
specrad_hist = np.full(T, np.inf)
# Cost
cost_future_hist = np.full(T, np.inf)
cost_adaptive_hist = np.full(T, np.inf)
cost_optimal_hist = np.full(T, np.inf)
# Model error
Aerr_hist = np.full(T, np.inf)
Berr_hist = np.full(T, np.inf)
Cerr_hist = np.full(T, np.inf)
# Loop over time
for t in range(T):
if log_diagnostics:
tag_list = []
# Only use the training data we have observed up until now (do not cheat by looking ahead into the full history)
# Only perform computations at the times of interest t_evals
if not t in t_evals:
cost_future_hist[t] = -1
continue
if t < t_start_estimate:
# Exploring
u_str = "Explore"
stable_str = printcolors.LightGray + 'Stable N/A' + printcolors.Default
else:
# Exploit estimated model
u_str = "Exploit"
# print(t)
# Start generating model and uncertainty estimates once there is enough data to get non-degenerate estimates
# Estimate state space model from input-output data via subspace ID using the SIPPY package
model, res = system_identification(y_train_hist[0:t], u_train_hist[0:t], id_method='N4SID',
SS_fixed_order=n, return_residuals=True)
w_est = res[0:n].T
v_est = res[n:].T
# Trasform model to resemble true system coordinates, for evaluation only, not used by algo
ss_model_trans, P = ss_change_coordinates(ss_true, model, method='match')
# Record model estimates and errors
Ahat = ss_model_trans.A
Bhat = ss_model_trans.B
Chat = ss_model_trans.C
What = ss_model_trans.Q
Vhat = ss_model_trans.R
Uhat = ss_model_trans.S
Ahat_hist[t] = Ahat
Bhat_hist[t] = Bhat
Chat_hist[t] = Chat
What_hist[t] = What
Vhat_hist[t] = Vhat
Uhat_hist[t] = Uhat
Aerr_hist[t] = la.norm(Ahat - A, ord='fro')
Berr_hist[t] = la.norm(Bhat - B, ord='fro')
Cerr_hist[t] = la.norm(Chat - C, ord='fro')
# Estimate model uncertainty
if control_scheme == 'robust':
uncertainty_dict = estimate_model_uncertainty(model, u_train_hist, y_train_hist, w_est, v_est, t, Nb,
uncertainty_estimator)
uncertainty = uncertainty_dict['uncertainty']
# Record multiplicative noise history
a_hist[t] = uncertainty.a
b_hist[t] = uncertainty.b
c_hist[t] = uncertainty.c
Aahist[t] = uncertainty.Aa
Bbhist[t] = uncertainty.Bb
Cchist[t] = uncertainty.Cc
else:
uncertainty = None
compensator, gamma_reduction, tag_list_cg = make_compensator(model, uncertainty, Y, R,
noise_pre_scale, noise_post_scale,
bisection_epsilon, log_diagnostics)
F, K, L = compensator.F, compensator.K, compensator.L
gamma_reduction_hist[t] = gamma_reduction
if log_diagnostics:
tag_list += tag_list_cg
# # Compute exploration control component
# if training_type == 'online':
# if control_scheme == 'robust':
# u_explore_scale = np.sqrt(np.max(a)) + np.sqrt(np.max(b))
# u_explore = u_explore_scale*np.sqrt(u_exploit_var)*u_train_hist[t]
# else:
# u_explore = np.sqrt(u_exploit_var)*npr.randn(m)
# else:
# u_explore = np.zeros(m)
# # Compute control using optimal control using estimate of system
# u_optimal_estimated = mdot(K, x_test)
# # Apply the sum of optimal and exploration controls
# u = u_optimal_estimated + u_explore
#
# # Compute control using optimal control given knowledge of true system
# u_opt = mdot(Kare_true, x_opt_test)
# Evaluate spectral radius of true closed-loop system with current compensator
performance = compute_performance(A, B, C, Q, R, W, V, F, K, L)
specrad_hist[t], cost_future_hist[t] = performance.sr, performance.ihc
if log_diagnostics:
if specrad_hist[t] > 1:
stable_str = printcolors.Red + 'Unstable' + printcolors.Default
else:
stable_str = printcolors.Green + 'Stable' + printcolors.Default
# Record compensator
F_hist[t] = F
K_hist[t] = K
L_hist[t] = L
# if log_diagnostics:
# if la.norm(x_test) > 1e4:
# tag_list.append(create_tag('x_test = %e > 1e3' % (la.norm(x_test)), message_type='fail'))
# # Accumulate cost
# if testing_type == 'online':
# cost_adaptive_hist[t] = mdot(x_test.T, Q, x_test) + mdot(u.T, R, u)
# cost_optimal_hist[t] = mdot(x_opt_test.T, Q, x_opt_test) + mdot(u_opt.T, R, u_opt)
# # Look up noise
# w = w_hist[t]
# v = v_hist[t]
# # Update test-time state
# if testing_type == 'online':
# if training_type == 'online':
# x_test = np.copy(x_train)
# else:
# # Update state under adaptive control
# x_test = np.dot(A, x_test) + np.dot(B, u) + w
#
# # Update the test-time state under optimal control
# x_opt_test = np.dot(A, x_opt_test) + np.dot(B, u_opt) + w
#
# # Record test-time state and control history
# x_test_hist[t+1] = x_test
# x_opt_test_hist[t+1] = x_opt_test
# u_test_hist[t] = u
# u_opt_test_hist[t] = u_opt
# Print and log diagnostic messages
if log_diagnostics:
time_whole_str = ''
time_header_str = "Time = %4d %s. %s." % (t, u_str, stable_str)
time_whole_str += time_header_str + '\n'
for tag in tag_list:
time_whole_str += tag + '\n'
log_str += time_whole_str
if print_diagnostics:
print(time_whole_str)
if log_diagnostics:
code_end_time = time()
code_elapsed_time = code_end_time - code_start_time
time_elapsed_str = '%12.6f' % code_elapsed_time
log_str += "Completed Monte Carlo sample %6d / %6d in %s seconds\n" % (monte_carlo_idx+1, Ns, time_elapsed_str)
else:
time_elapsed_str = '?'
return {'cost_adaptive_hist': cost_adaptive_hist,
'cost_optimal_hist': cost_optimal_hist,
'cost_future_hist': cost_future_hist,
'specrad_hist': specrad_hist,
'Ahat_hist': Ahat_hist,
'Bhat_hist': Bhat_hist,
'Chat_hist': Chat_hist,
'What_hist': What_hist,
'Vhat_hist': Vhat_hist,
'Uhat_hist': Uhat_hist,
'Aerr_hist': Aerr_hist,
'Berr_hist': Berr_hist,
'Cerr_hist': Cerr_hist,
'a_hist': a_hist,
'b_hist': b_hist,
'c_hist': c_hist,
'Aahist': Aahist,
'Bbhist': Bbhist,
'Cchist': Cchist,
'gamma_reduction_hist': gamma_reduction_hist,
'x_train_hist': x_train_hist,
'u_train_hist': u_train_hist,
'y_train_hist': u_train_hist,
# 'x_test_hist': x_test_hist, # unused
# 'u_test_hist': u_test_hist,
# 'y_test_hist': y_test_hist,
# 'x_opt_test_hist': x_opt_test_hist,
# 'u_opt_test_hist': u_opt_test_hist,
# 'y_opt_test_hist': y_opt_test_hist,
'F_hist': F_hist,
'K_hist': K_hist,
'L_hist': L_hist,
'monte_carlo_idx': np.array(monte_carlo_idx),
'log_str': log_str,
'time_elapsed_str': time_elapsed_str}
def monte_carlo_group(control_scheme, uncertainty_estimator, required_args,
conditional_args, w_hist, v_hist, parallel_option='serial'):
print("Evaluating control scheme: "+control_scheme)
# Unpack arguments from dictionaries
n = required_args['n']
m = required_args['m']
p = required_args['p']
Ns = required_args['Ns']
T = required_args['T']
# History arrays
x_train_hist = conditional_args['x_train_hist']
u_train_hist = conditional_args['u_train_hist']
y_train_hist = conditional_args['y_train_hist']
# Simulate each Monte Carlo trial
shape_dict = {
# 'x_train_hist': [T+1, n], # Disabling to reduce storage space
# 'u_train_hist': [T, m],
# 'y_train_hist': [T, p],
# 'x_test_hist': [T+1, n], # Unused
# 'u_test_hist': [T, m],
# 'y_test_hist': [T, p],
# 'x_opt_test_hist': [T+1, n],
# 'u_opt_test_hist': [T, m],
# 'y_opt_test_hist': [T, p],
# 'F_hist': [T, n, n], # Disabling to reduce storage space
# 'K_hist': [T, m, n],
# 'L_hist': [T, n, p],
# 'Ahat_hist': [T, n, n], # Disabling to reduce storage space
# 'Bhat_hist': [T, n, m],
# 'Chat_hist': [T, p, n],
# 'What_hist': [T, n, n],
# 'Vhat_hist': [T, p, p],
# 'Uhat_hist': [T, n, p],
'a_hist': [T, n*n],
'b_hist': [T, n*m],
'c_hist': [T, p*n],
# 'Aahist': [T, n*n, n, n], # Disabling to reduce storage space
# 'Bbhist': [T, n*m, n, m],
# 'Cchist': [T, p*n, p, n],
'gamma_reduction_hist': [T],
'specrad_hist': [T],
'cost_future_hist': [T],
# 'cost_adaptive_hist': [T], # Disabling to reduce storage space
# 'cost_optimal_hist': [T],
'Aerr_hist': [T],
'Berr_hist': [T],
'Cerr_hist': [T],
'monte_carlo_idx': [1],
'log_str': None,
'time_elapsed_str': None}
fields = shape_dict.keys()
output_dict = {}
for field in fields:
if field == 'log_str' or field == 'time_elapsed_str':
output_dict[field] = [None]*Ns
else:
output_field_shape = [Ns] + shape_dict[field]
output_dict[field] = np.zeros(output_field_shape)
def collect_result(sample_dict):
k = sample_dict['monte_carlo_idx']
time_elapsed_str = sample_dict['time_elapsed_str']
for field in fields:
output_dict[field][k] = sample_dict[field]
print("Completed Monte Carlo sample %6d / %6d in %s seconds" % (k+1, Ns, time_elapsed_str))
if parallel_option == 'parallel':
# Start the parallel process pool
num_cpus_to_use = mp.cpu_count() - 1 # leave 1 cpu open for other tasks, ymmv
pool = mp.Pool(num_cpus_to_use)
for k in range(Ns):
sample_args = (control_scheme, uncertainty_estimator, required_args,
x_train_hist[k], u_train_hist[k], y_train_hist[k], w_hist[k], v_hist[k], k)
if parallel_option == 'serial':
# Serial single-threaded processing
sample_dict = monte_carlo_sample(*sample_args)
collect_result(sample_dict)
elif parallel_option == 'parallel':
# Asynchronous parallel CPU processing
pool.apply_async(monte_carlo_sample, args=sample_args, callback=collect_result)
if parallel_option == 'parallel':
# Close and join the parallel process pool
pool.close()
pool.join()
print('')
return output_dict
# TODO unused for now
def compute_derived_data(output_dict, receding_horizon=5):
"""
Compute derived cost data quantities from the results.
These derived data can be computed and stored for faster loading/plotting,
or can be calculated after loading to reduce data storage requirements.
output_dict is modified/mutated
"""
# for control_scheme in output_dict.keys():
# cost_adaptive_hist = output_dict[control_scheme]['cost_adaptive_hist']
# cost_optimal_hist = output_dict[control_scheme]['cost_optimal_hist']
#
# # Compute receding horizon data
# Ns, T = cost_adaptive_hist.shape
# cost_adaptive_hist_receding = np.full([Ns, T], np.inf)
# cost_optimal_hist_receding = np.full([Ns, T], np.inf)
# for k in range(Ns):
# for t in range(T):
# if t > receding_horizon:
# cost_adaptive_hist_receding[k, t] = np.mean(cost_adaptive_hist[k, t-receding_horizon:t])
# cost_optimal_hist_receding[k, t] = np.mean(cost_optimal_hist[k, t-receding_horizon:t])
#
# # Compute accumulated cost
# cost_adaptive_hist_accum = np.full([Ns, T], np.inf)
# cost_optimal_hist_accum = np.full([Ns, T], np.inf)
# for k in range(Ns):
# for t in range(T):
# cost_adaptive_hist_accum[k, t] = np.sum(cost_adaptive_hist[k, 0:t])
# cost_optimal_hist_accum[k, t] = np.sum(cost_optimal_hist[k, 0:t])
#
# # Compute regret and regret_ratio
# regret_hist = cost_adaptive_hist - np.mean(cost_optimal_hist, axis=0)
# regret_hist_receding = cost_adaptive_hist_receding - np.mean(cost_optimal_hist_receding, axis=0)
# regret_hist_accum = cost_adaptive_hist_accum - np.mean(cost_optimal_hist_accum, axis=0)
#
# regret_ratio_hist = cost_adaptive_hist / np.mean(cost_optimal_hist, axis=0)
# regret_ratio_hist_receding = cost_adaptive_hist_receding / np.mean(cost_optimal_hist_receding, axis=0)
# regret_ratio_hist_accum = cost_adaptive_hist_accum / np.mean(cost_optimal_hist_accum, axis=0)
#
# output_dict[control_scheme]['cost_adaptive_hist_receding'] = cost_adaptive_hist_receding
# output_dict[control_scheme]['cost_optimal_hist_receding'] = cost_optimal_hist_receding
# output_dict[control_scheme]['cost_adaptive_hist_accum'] = cost_adaptive_hist_accum
# output_dict[control_scheme]['cost_optimal_hist_accum'] = cost_optimal_hist_accum
# output_dict[control_scheme]['regret_hist'] = regret_hist
# output_dict[control_scheme]['regret_hist_receding'] = regret_hist_receding
# output_dict[control_scheme]['regret_hist_accum'] = regret_hist_accum
# output_dict[control_scheme]['regret_ratio_hist'] = regret_ratio_hist
# output_dict[control_scheme]['regret_ratio_hist_receding'] = regret_ratio_hist_receding
# output_dict[control_scheme]['regret_ratio_hist_accum'] = regret_ratio_hist_accum
pass
def mainfun(uncertainty_estimator, Ns, Nb, T, t_evals, noise_pre_scale, noise_post_scale,
cost_horizon, horizon_method, t_cost_fh, system_idx, system_kwargs, seed, parallel_option):
# Set up output directory
timestr = str(time()).replace('.', 'p')
dirname_out = timestr+'_Ns_'+str(Ns)+'_T_'+str(T)+'_system_'+str(system_idx)+'_seed_'+str(seed)
dirname_out = os.path.join('..', 'experiments', dirname_out)
create_directory(dirname_out)
# Seed the random number generator
npr.seed(seed)
# Problem data
n, m, p, A, B, C, D, Y, Q, R, W, V, U = gen_system_omni(system_idx, **system_kwargs)
filename_out = 'system_dict.pickle'
save_system(n, m, p, A, B, C, D, Y, Q, R, W, V, U, dirname_out, filename_out)
model_true = make_ss(A, B, C, D)
# Catch numerical error-prone case when system is open-loop unstable and not using control during training
if specrad(A) > 1:
response = yes_or_no("System is open-loop unstable, offline trajectories may cause numerical issues. Continue?")
if not response:
return
# Initial state
x0 = np.zeros(n)
# Compare with the true optimal gains given perfect information of the system
# IMPORTANT: cannot compare gains directly because the internal state representation is different
# Only compare closed-loop transfer functions or closed-loop performance cost
Popt, Kopt = dare_gain(A, B, Q, R)
Sopt, Lopt = dare_gain(A.T, C.T, W, V, E=None, S=U)
Lopt = -Lopt.T
Fopt = sysmat_cl(A, B, C, Kopt, Lopt)
performance_true = compute_performance(A, B, C, Q, R, W, V, Fopt, Kopt, Lopt)
specrad_true, cost_are_true = performance_true.sr, performance_true.ihc
# Time history
t_hist = np.arange(T)
# Time to begin forming model estimates
t_start_estimate_lwr = int(n*(n+m+p)/p)
t_start_estimate = 2*t_start_estimate_lwr
if t_start_estimate < t_start_estimate_lwr:
response = yes_or_no("t_start_estimate chosen < int(n*(n+m+p)/p). Continue?")
if not response:
return
# TODO remove this if unused
# Time to switch from exploration to exploitation
t_explore = t_start_estimate+1
if t_explore < t_start_estimate_lwr+1:
response = yes_or_no("t_explore chosen < int(n*(n+m+p)/p) + 1. Continue?")
if not response:
return
# We made this choice explicit in the paper!
# practically we do not have knowledge of W so this is not exactly fair practically,
# but is useful for testing since it ensures the signal-to-noise ratio is high enough
# for good sysID w/o taking a lot of data samples
# Input exploration noise during explore and exploit phases
u_explore_var = np.max(np.abs(la.eig(W)[0])) + np.max(np.abs(la.eig(V)[0]))
u_exploit_var = np.max(np.abs(la.eig(W)[0])) + np.max(np.abs(la.eig(V)[0]))
# Bisection tolerance
bisection_epsilon = 0.01
# Export the simulation options for later reference
sim_options = {'uncertainty_estimator': uncertainty_estimator,
'Ns': Ns,
'Nb': Nb,
'T': T,
'system_idx': system_idx,
'seed': seed,
'bisection_epsilon': bisection_epsilon,
't_start_estimate': t_start_estimate,
't_explore': t_explore,
'u_explore_var': u_explore_var,
'u_exploit_var': u_exploit_var}
filename_out = 'sim_options.pickle'
pickle_export(dirname_out, filename_out, sim_options)
# control_schemes = ['certainty_equivalent']
control_schemes = ['certainty_equivalent', 'robust']
output_dict = {}
# Generate sample trajectory data (pure exploration)
x_train_hist, u_train_hist, y_train_hist, w_hist, v_hist = make_offline_data(A, B, C, D, W, V, Ns, T, u_explore_var, x0, verbose=True)
# Evaluate control schemes
required_args = {'n': n,
'm': m,
'p': p,
'A': A,
'B': B,
'C': C,
'D': D,
'Y': Y,
'Q': Q,
'R': R,
'W': W,
'V': V,
'U': U,
'Ns': Ns,
'Nb': Nb,
'T': T,
'x0': x0,
'bisection_epsilon': bisection_epsilon,
't_evals': t_evals,
't_start_estimate': t_start_estimate,
't_explore': t_explore,
't_cost_fh': t_cost_fh,
'cost_horizon': cost_horizon,
'Kopt': Kopt,
'Lopt': Lopt,
'u_explore_var': u_explore_var,
'u_exploit_var': u_exploit_var,
'noise_pre_scale': noise_pre_scale,
'noise_post_scale': noise_post_scale}
conditional_args = {'x_train_hist': x_train_hist,
'u_train_hist': u_train_hist,
'y_train_hist': y_train_hist}
for control_scheme in control_schemes:
output_dict[control_scheme] = monte_carlo_group(control_scheme=control_scheme,
uncertainty_estimator=uncertainty_estimator,
required_args=required_args,
conditional_args=conditional_args,
w_hist=w_hist,
v_hist=v_hist,
parallel_option=parallel_option)
compute_derived_data(output_dict)
# Export relevant data
# filename_out = training_type+'_training_'+testing_type+'_testing_'+'comparison_results'+'.pickle'
filename_out = 'comparison_results' + '.pickle'
data_out = [output_dict, cost_are_true, t_hist, t_start_estimate, t_evals]
pickle_export(dirname_out, filename_out, data_out)
return output_dict, cost_are_true, t_hist, t_start_estimate
if __name__ == "__main__":
# Create the parser and add arguments
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0,
help="Global seed for the Monte Carlo batch (default=0).")
parser.add_argument('--num_trials', type=int, default=100,
help="Number of trials in the Monte Carlo batch (default=100).")
# Parse
args = parser.parse_args()
seed = args.seed
# Choose the uncertainty estimation scheme
# uncertainty_estimator = 'exact'
# uncertainty_estimator = 'sample_transition_bootstrap'
uncertainty_estimator = 'semiparametric_bootstrap'
# Number of Monte Carlo samples
# Ns = 100000 # Used in paper
# Ns = 10000
# Ns = 1000
# Ns = 100
# Ns = 10
# Ns = 2
Ns = args.num_trials
# Number of bootstrap samples
Nb = 100 # Used in paper
# Nb = 50
# Nb = 20
# Simulation time
t_evals = np.array([20, 40, 80, 160, 320]) # Used in paper
# t_evals = np.arange(20, 320, step=10)
# t_evals = np.arange(200, 300+1, 50)
# t_evals = np.arange(200, 600+1, 50)
T = np.max(t_evals)+1
# Choose noise_pre_scale (AKA gamma), the pre-limit multiplicative noise scaling parameter, should be >= 1
# "How much mult noise do you want?"
noise_pre_scale = 1.0
# noise_pre_scale = 0.001
# Choose the post-limit multiplicative noise scaling parameter, must be between 0 and 1
# "How much of the max possible mult noise do you want?"
noise_post_scale = 1.0
# noise_post_scale = 1 / noise_pre_scale
# Choose cost horizon
cost_horizon = 'infinite'
horizon_method = None
t_cost_fh = None
# Random number generator seed
# seed = npr.randint(1000)
# System to choose
# system_idx = 'inverted_pendulum'
# system_idx = 'scalar'
# system_idx = 'rand'
system_idx = 1
if system_idx == 'scalar':
system_kwargs = dict(A=1, B=1, Q=1, R=0, W=1, V=0.1)
elif system_idx == 'rand':
system_kwargs = dict(n=4, m=2, p=2, spectral_radius=0.9, noise_scale=0.00001, seed=1)
# system_kwargs = dict(n=2, m=1, p=1, spectral_radius=0.9, noise_scale=0.0001, seed=1)
else:
system_kwargs = dict()
# Parallel computation option
# parallel_option = 'serial'
parallel_option = 'parallel'
# Run main
output_dict, cost_are_true, t_hist, t_start_estimate = mainfun(uncertainty_estimator, Ns, Nb, T, t_evals, noise_pre_scale, noise_post_scale,
cost_horizon, horizon_method, t_cost_fh, system_idx, system_kwargs, seed, parallel_option)
# Plotting
plt.close('all')
multi_plot_paper(output_dict, cost_are_true, t_hist, t_start_estimate, t_evals)
| 28,139 | 39.257511 | 157 |
py
|
Colless
|
Colless-master/python/colless.py
|
from functools import lru_cache
from biotrees.util import unique
from biotrees.shape import Shape
from biotrees.shape.generator import comb
def binary(n):
"""
Returns the binary representation of an `int` n in `String`.
:param n: `int` instance.
:return: `String` instance.
"""
return "{0:b}".format(n)
def maxpower2(n):
"""
Returns the maximum power of 2 that divides an `int` n.
:param n: `int` instance.
:return: `int` instance.
"""
return last1(binary(n))
def last1(bn):
"""
Returns the maximum power of 2 that divides an `int` n, given in binary `String` format.
:param n: `String` instance.
:return: `int` instance.
"""
return len(bn) - 1 - max(filter(lambda i : bn[i] == '1', range(0, len(bn))))
def binary_colless_index(tree):
"""
Returns the `int` value of the Colless index for a given `Shape` instance, tree.
:param tree: `Shape` instance.
:return: `int` instance.
"""
def go(t):
if t.is_leaf():
return 0, 1
left, right = t.children
(cil, nl), (cir, nr) = go(left), go(right)
return abs(nl - nr) + cil + cir, nl + nr
return go(tree)[0]
@lru_cache(maxsize=None)
def mincolless(n):
"""
Returns all the `Shape` instances that attain the minimum Colless index with `int` n leaves.
:param n: `int` instance.
:return: `list` instance.
"""
if n == 0:
return []
elif n == 1:
return [Shape.LEAF]
elif n == 2:
return [Shape.CHERRY]
else:
tss = []
for n1, n2 in mincolless_root(n):
ts = unique([Shape(sorted([t1, t2])) for t1 in mincolless(n1)
for t2 in mincolless(n2)])
tss = tss + ts
return tss
@lru_cache(maxsize=None)
def mincolless_root(n):
"""
Returns all the possible distributions of the number of children in the root so that
a tree with `int` n leaves can attain the minimum Colless index.
:param n: `int` instance.
:return: `list` instance.
"""
ns = []
if n % 2 == 0:
ns.append((n // 2, n // 2))
k = maxpower2(n)
oddn = n // 2 ** k
bn = binary(oddn)
ls = list(map(lambda x : len(bn) - 1 - x, filter(lambda i : bn[i] == '1', range(0, len(bn)))))
if oddn == 3: # this case must be treated apart
ns.append((2**k, 2**(k+1)))
elif len(ls) > 1:
for i in range(1, len(ls)-1):
if ls[i+1] < ls[i] - 1 or i == len(ls) - 2:
l = ls[i]
t = sum(2**ls[j] for j in range(i+1, len(ls)-1)) // 2
p = (oddn - 2 ** l - 2 * t - 1) // 2 ** (l + 1)
ns.append((2**k*(2**l*p + 2*t + 1), 2**(k + l)*(p + 1)))
for i in range(1, len(ls)):
if ls[i-1] > ls[i] + 1:
l = ls[i] + 1
p = (oddn - sum(2 ** ls[j] for j in range(len(ls) - 1, i, -1))) // 2 ** (l + 1)
t = (2 ** (l+1) * p + 2 ** l - 1 - oddn) // 2
ns.append((2**(k + l)*p, 2**k*(2**l*(p + 1) - 2*t - 1)))
return ns
def maxcolless(n):
"""
Returns all the `Shape` instances that attain the maximum Colless index with `int` n leaves.
:param n: `int` instance.
:return: `list` instance.
"""
return [comb(n)]
| 3,334 | 25.259843 | 98 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/config/parse_args.py
|
import configargparse
def str2bool(v):
""" from https://stackoverflow.com/a/43357954/1361529 """
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise configargparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = configargparse.ArgParser()
parser.add('-c', '--config', required=True, is_config_file=True, help='Config file path')
parser.add("--name", type=str, default="main")
parser.add("--train_data_path", action="append")
parser.add("--val_data_path", action="append")
parser.add("--test_data_path", action="append")
parser.add("--model_save_path", required=True)
parser.add("--pose_representation", type=str, default='3d_vec')
parser.add("--mean_dir_vec", action="append", type=float, nargs='*')
parser.add("--mean_pose", action="append", type=float, nargs='*')
parser.add("--random_seed", type=int, default=-1)
parser.add("--save_result_video", type=str2bool, default=True)
# word embedding
parser.add("--wordembed_path", type=str, default=None)
parser.add("--wordembed_dim", type=int, default=100)
parser.add("--freeze_wordembed", type=str2bool, default=False)
# model
parser.add("--model", type=str, required=True)
parser.add("--epochs", type=int, default=10)
parser.add("--batch_size", type=int, default=50)
parser.add("--dropout_prob", type=float, default=0.3)
parser.add("--n_layers", type=int, default=2)
parser.add("--hidden_size", type=int, default=200)
parser.add("--z_type", type=str, default='none')
parser.add("--input_context", type=str, default='both')
# dataset
parser.add("--motion_resampling_framerate", type=int, default=24)
parser.add("--n_poses", type=int, default=50)
parser.add("--n_pre_poses", type=int, default=5)
parser.add("--subdivision_stride", type=int, default=5)
parser.add("--loader_workers", type=int, default=0)
# GAN parameter
parser.add("--GAN_noise_size", type=int, default=0)
# training
parser.add("--learning_rate", type=float, default=0.001)
parser.add("--discriminator_lr_weight", type=float, default=0.2)
parser.add("--loss_regression_weight", type=float, default=50)
parser.add("--loss_gan_weight", type=float, default=1.0)
parser.add("--loss_kld_weight", type=float, default=0.1)
parser.add("--loss_reg_weight", type=float, default=0.01)
parser.add("--loss_warmup", type=int, default=-1)
# eval
parser.add("--eval_net_path", type=str, default='')
args = parser.parse_args()
return args
| 2,717 | 38.391304 | 93 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train_feature_extractor.py
|
import time
import sys
from data_loader.h36m_loader import Human36M
[sys.path.append(i) for i in ['.', '..']]
from torch import optim
import torch.nn.functional as F
import matplotlib
from model.embedding_net import EmbeddingNet
from train_eval.train_joint_embed import eval_embed
from utils.average_meter import AverageMeter
matplotlib.use('Agg') # we don't use interactive GUI
from config.parse_args import parse_args
from data_loader.lmdb_data_loader import *
import utils.train_utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def evaluate_testset(test_data_loader, generator):
# to evaluation mode
generator.train(False)
losses = AverageMeter('loss')
start = time.time()
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
target_poses, target_vec = data
batch_size = target_vec.size(0)
target = target_vec.to(device)
loss, _ = eval_embed(None, None, None, target, generator)
losses.update(loss.item(), batch_size)
# back to training mode
generator.train(True)
# print
ret_dict = {'loss': losses.avg}
elapsed_time = time.time() - start
logging.info('[VAL] loss: {:.3f} / {:.1f}s'.format(losses.avg, elapsed_time))
return ret_dict
def train_iter(args, epoch, target_data, net, optim):
# zero gradients
optim.zero_grad()
variational_encoding = False # AE or VAE
# reconstruction loss
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_data = \
net(None, None, None, target_data, None, variational_encoding=variational_encoding)
recon_loss = F.l1_loss(recon_data, target_data, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
if True: # use pose diff
target_diff = target_data[:, 1:] - target_data[:, :-1]
recon_diff = recon_data[:, 1:] - recon_data[:, :-1]
recon_loss += torch.mean(F.l1_loss(recon_diff, target_diff, reduction='none'), dim=(1, 2))
recon_loss = torch.sum(recon_loss)
# KLD
if variational_encoding:
if net.mode == 'speech':
KLD = -0.5 * torch.sum(1 + context_logvar - context_mu.pow(2) - context_logvar.exp())
else:
KLD = -0.5 * torch.sum(1 + pose_logvar - pose_mu.pow(2) - pose_logvar.exp())
if epoch < 10:
KLD_weight = 0
else:
KLD_weight = min(1.0, (epoch - 10) * 0.05)
recon_weight = 100
loss = recon_weight * recon_loss + KLD_weight * KLD
else:
recon_weight = 1
loss = recon_weight * recon_loss
loss.backward()
optim.step()
ret_dict = {'loss': recon_weight * recon_loss.item()}
if variational_encoding:
ret_dict['KLD'] = KLD_weight * KLD.item()
return ret_dict
def main(config):
args = config['args']
# random seed
if args.random_seed >= 0:
utils.train_utils.set_random_seed(args.random_seed)
# set logger
utils.train_utils.set_logger(args.model_save_path, os.path.basename(__file__).replace('.py', '.log'))
# dataset
mean_dir_vec = np.squeeze(np.array(args.mean_dir_vec))
path = 'data/h36m/data_3d_h36m.npz' # from https://github.com/facebookresearch/VideoPose3D/blob/master/DATASETS.md
train_dataset = Human36M(path, mean_dir_vec, is_train=True, augment=False)
val_dataset = Human36M(path, mean_dir_vec, is_train=False, augment=False)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False, drop_last=True)
# train
pose_dim = 27 # 9 x 3
start = time.time()
loss_meters = [AverageMeter('loss'), AverageMeter('var_loss')]
best_val_loss = (1e+10, 0) # value, epoch
# interval params
print_interval = int(len(train_loader) / 5)
save_sample_result_epoch_interval = 10
save_model_epoch_interval = 20
# init model and optimizer
generator = EmbeddingNet(args, pose_dim, args.n_poses, None, None, None, mode='pose').to(device)
gen_optimizer = optim.Adam(generator.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
# training
global_iter = 0
best_values = {} # best values for all loss metrics
for epoch in range(args.epochs):
# evaluate the test set
val_metrics = evaluate_testset(test_loader, generator)
# best?
val_loss = val_metrics['loss']
is_best = val_loss < best_val_loss[0]
if is_best:
logging.info(' *** BEST VALIDATION LOSS: {:.3f}'.format(val_loss))
best_val_loss = (val_loss, epoch)
else:
logging.info(' best validation loss so far: {:.3f} at EPOCH {}'.format(best_val_loss[0], best_val_loss[1]))
# save model
if is_best or (epoch % save_model_epoch_interval == 0 and epoch > 0):
gen_state_dict = generator.state_dict()
if is_best:
save_name = '{}/{}_checkpoint_best.bin'.format(args.model_save_path, args.name)
utils.train_utils.save_checkpoint({
'args': args, 'epoch': epoch, 'pose_dim': pose_dim, 'gen_dict': gen_state_dict,
}, save_name)
# save sample results
if args.save_result_video and epoch % save_sample_result_epoch_interval == 0:
evaluate_sample_and_save_video(epoch, args.name, test_loader, generator, args=args)
# train iter
iter_start_time = time.time()
for iter_idx, (target_pose, target_vec) in enumerate(train_loader, 0):
global_iter += 1
batch_size = target_vec.size(0)
target_vec = target_vec.to(device)
loss = train_iter(args, epoch, target_vec, generator, gen_optimizer)
# loss values
for loss_meter in loss_meters:
name = loss_meter.name
if name in loss:
loss_meter.update(loss[name], batch_size)
# print training status
if (iter_idx + 1) % print_interval == 0:
print_summary = 'EP {} ({:3d}) | {:>8s}, {:.0f} samples/s | '.format(
epoch, iter_idx + 1, utils.train_utils.time_since(start),
batch_size / (time.time() - iter_start_time))
for loss_meter in loss_meters:
if loss_meter.count > 0:
print_summary += '{}: {:.3f}, '.format(loss_meter.name, loss_meter.avg)
loss_meter.reset()
logging.info(print_summary)
iter_start_time = time.time()
# print best losses
logging.info('--------- best loss values ---------')
for key in best_values.keys():
logging.info('{}: {:.3f} at EPOCH {}'.format(key, best_values[key][0], best_values[key][1]))
def evaluate_sample_and_save_video(epoch, prefix, test_data_loader, generator, args, n_save=None, save_path=None):
generator.train(False) # eval mode
start = time.time()
if not n_save:
n_save = 1 if epoch <= 0 else 5
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
if iter_idx >= n_save: # save N samples
break
_, target_dir_vec = data
# prepare
select_index = 20
target_dir_vec = target_dir_vec[select_index, :, :].unsqueeze(0).to(device)
# generation
_, _, _, _, _, _, out_dir_vec = generator(None, None, None, target_dir_vec, variational_encoding=False)
# to video
target_dir_vec = np.squeeze(target_dir_vec.cpu().numpy())
out_dir_vec = np.squeeze(out_dir_vec.cpu().numpy())
if save_path is None:
save_path = args.model_save_path
mean_data = np.array(args.mean_dir_vec).reshape(-1, 3)
utils.train_utils.create_video_and_save(
save_path, epoch, prefix, iter_idx,
target_dir_vec, out_dir_vec, mean_data, '')
generator.train(True) # back to training mode
logging.info('saved sample videos, took {:.1f}s'.format(time.time() - start))
return True
if __name__ == '__main__':
_args = parse_args()
main({'args': _args})
| 8,374 | 34.189076 | 120 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/synthesize.py
|
import datetime
import logging
import math
import os
import pickle
import random
import sys
import librosa
import soundfile as sf
import lmdb
import numpy as np
import time
import pyarrow
import torch
from torch.utils.data import DataLoader
import utils
from data_loader.lmdb_data_loader import SpeechMotionDataset, default_collate_fn, word_seq_collate_fn
from model.embedding_space_evaluator import EmbeddingSpaceEvaluator
from train import evaluate_testset
from utils.data_utils import extract_melspectrogram, remove_tags_marks, convert_dir_vec_to_pose
from utils.train_utils import create_video_and_save, set_logger
from utils.tts_helper import TTSHelper
sys.path.insert(0, '../../gentle')
import gentle
from data_loader.data_preprocessor import DataPreprocessor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gentle_resources = gentle.Resources()
def generate_gestures(args, pose_decoder, lang_model, audio, words, audio_sr=16000, vid=None,
seed_seq=None, fade_out=False):
out_list = []
n_frames = args.n_poses
clip_length = len(audio) / audio_sr
use_spectrogram = False
if args.model == 'speech2gesture':
use_spectrogram = True
# pre seq
pre_seq = torch.zeros((1, n_frames, len(args.mean_dir_vec) + 1))
if seed_seq is not None:
pre_seq[0, 0:args.n_pre_poses, :-1] = torch.Tensor(seed_seq[0:args.n_pre_poses])
pre_seq[0, 0:args.n_pre_poses, -1] = 1 # indicating bit for seed poses
sr = 16000
spectrogram = None
if use_spectrogram:
# audio to spectrogram
spectrogram = extract_melspectrogram(audio, sr)
# divide into synthesize units and do synthesize
unit_time = args.n_poses / args.motion_resampling_framerate
stride_time = (args.n_poses - args.n_pre_poses) / args.motion_resampling_framerate
if clip_length < unit_time:
num_subdivision = 1
else:
num_subdivision = math.ceil((clip_length - unit_time) / stride_time) + 1
spectrogram_sample_length = int(round(unit_time * sr / 512))
audio_sample_length = int(unit_time * audio_sr)
end_padding_duration = 0
# prepare speaker input
if args.z_type == 'speaker':
if not vid:
vid = random.randrange(pose_decoder.z_obj.n_words)
print('vid:', vid)
vid = torch.LongTensor([vid]).to(device)
else:
vid = None
print('{}, {}, {}, {}, {}'.format(num_subdivision, unit_time, clip_length, stride_time, audio_sample_length))
out_dir_vec = None
start = time.time()
for i in range(0, num_subdivision):
start_time = i * stride_time
end_time = start_time + unit_time
# prepare spectrogram input
in_spec = None
if use_spectrogram:
# prepare spec input
audio_start = math.floor(start_time / clip_length * spectrogram.shape[0])
audio_end = audio_start + spectrogram_sample_length
in_spec = spectrogram[:, audio_start:audio_end]
in_spec = torch.from_numpy(in_spec).unsqueeze(0).to(device)
# prepare audio input
audio_start = math.floor(start_time / clip_length * len(audio))
audio_end = audio_start + audio_sample_length
in_audio = audio[audio_start:audio_end]
if len(in_audio) < audio_sample_length:
if i == num_subdivision - 1:
end_padding_duration = audio_sample_length - len(in_audio)
in_audio = np.pad(in_audio, (0, audio_sample_length - len(in_audio)), 'constant')
in_audio = torch.from_numpy(in_audio).unsqueeze(0).to(device).float()
# prepare text input
word_seq = DataPreprocessor.get_words_in_time_range(word_list=words, start_time=start_time, end_time=end_time)
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
word_indices = np.zeros(len(word_seq) + 2)
word_indices[0] = lang_model.SOS_token
word_indices[-1] = lang_model.EOS_token
frame_duration = (end_time - start_time) / n_frames
for w_i, word in enumerate(word_seq):
print(word[0], end=', ')
idx = max(0, int(np.floor((word[1] - start_time) / frame_duration)))
extended_word_indices[idx] = lang_model.get_word_index(word[0])
word_indices[w_i + 1] = lang_model.get_word_index(word[0])
print(' ')
in_text_padded = torch.LongTensor(extended_word_indices).unsqueeze(0).to(device)
in_text = torch.LongTensor(word_indices).unsqueeze(0).to(device)
# prepare pre seq
if i > 0:
pre_seq[0, 0:args.n_pre_poses, :-1] = out_dir_vec.squeeze(0)[-args.n_pre_poses:]
pre_seq[0, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq = pre_seq.float().to(device)
pre_seq_partial = pre_seq[0, 0:args.n_pre_poses, :-1].unsqueeze(0)
# synthesize
print(in_text_padded)
if args.model == 'multimodal_context':
out_dir_vec, *_ = pose_decoder(pre_seq, in_text_padded, in_audio, vid)
elif args.model == 'joint_embedding':
_, _, _, _, _, _, out_dir_vec = pose_decoder(in_text_padded, in_audio, pre_seq_partial, None, 'speech')
elif args.model == 'seq2seq':
words_lengths = torch.LongTensor([in_text.shape[1]]).to(device)
out_dir_vec = pose_decoder(in_text, words_lengths, pre_seq_partial, None)
elif args.model == 'speech2gesture':
out_dir_vec = pose_decoder(in_spec, pre_seq_partial)
else:
assert False
out_seq = out_dir_vec[0, :, :].data.cpu().numpy()
# smoothing motion transition
if len(out_list) > 0:
last_poses = out_list[-1][-args.n_pre_poses:]
out_list[-1] = out_list[-1][:-args.n_pre_poses] # delete last 4 frames
for j in range(len(last_poses)):
n = len(last_poses)
prev = last_poses[j]
next = out_seq[j]
out_seq[j] = prev * (n - j) / (n + 1) + next * (j + 1) / (n + 1)
out_list.append(out_seq)
print('generation took {:.2} s'.format((time.time() - start) / num_subdivision))
# aggregate results
out_dir_vec = np.vstack(out_list)
# additional interpolation for seq2seq
if args.model == 'seq2seq':
n_smooth = args.n_pre_poses
for i in range(num_subdivision):
start_frame = args.n_pre_poses + i * (args.n_poses - args.n_pre_poses) - n_smooth
if start_frame < 0:
start_frame = 0
end_frame = start_frame + n_smooth * 2
else:
end_frame = start_frame + n_smooth * 3
# spline interp
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
coeffs = np.polyfit(x, y, 3)
fit_functions = [np.poly1d(coeffs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
interpolated_y = np.transpose(np.asarray(interpolated_y)) # (num_frames x dims)
out_dir_vec[start_frame:end_frame] = interpolated_y
# fade out to the mean pose
if fade_out:
n_smooth = args.n_pre_poses
start_frame = len(out_dir_vec) - int(end_padding_duration / audio_sr * args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec) < end_frame:
out_dir_vec = np.pad(out_dir_vec, [(0, end_frame - len(out_dir_vec)), (0, 0)], mode='constant')
out_dir_vec[end_frame-n_smooth:] = np.zeros((len(args.mean_dir_vec))) # fade out to mean poses
# interpolation
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
coeffs = np.polyfit(x, y, 2, w=w)
fit_functions = [np.poly1d(coeffs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
interpolated_y = np.transpose(np.asarray(interpolated_y)) # (num_frames x dims)
out_dir_vec[start_frame:end_frame] = interpolated_y
return out_dir_vec
def align_words(audio, text):
# resample audio to 8K
audio_8k = librosa.resample(audio, 16000, 8000)
wave_file = 'output/temp.wav'
sf.write(wave_file, audio_8k, 8000, 'PCM_16')
# run gentle to align words
aligner = gentle.ForcedAligner(gentle_resources, text, nthreads=2, disfluency=False,
conservative=False)
gentle_out = aligner.transcribe(wave_file, logging=logging)
words_with_timestamps = []
for i, gentle_word in enumerate(gentle_out.words):
if gentle_word.case == 'success':
words_with_timestamps.append([gentle_word.word, gentle_word.start, gentle_word.end])
elif 0 < i < len(gentle_out.words) - 1:
words_with_timestamps.append([gentle_word.word, gentle_out.words[i-1].end, gentle_out.words[i+1].start])
return words_with_timestamps
def main(mode, checkpoint_path, option):
args, generator, loss_fn, lang_model, speaker_model, out_dim = utils.train_utils.load_checkpoint_and_model(
checkpoint_path, device)
result_save_path = 'output/generation_results'
# load mean vec
mean_pose = np.array(args.mean_pose).squeeze()
mean_dir_vec = np.array(args.mean_dir_vec).squeeze()
# load lang_model
vocab_cache_path = os.path.join('data/ted_dataset', 'vocab_cache.pkl')
with open(vocab_cache_path, 'rb') as f:
lang_model = pickle.load(f)
if args.model == 'seq2seq':
collate_fn = word_seq_collate_fn
else:
collate_fn = default_collate_fn
def load_dataset(path):
dataset = SpeechMotionDataset(path,
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=speaker_model,
mean_pose=mean_pose,
mean_dir_vec=mean_dir_vec
)
print(len(dataset))
return dataset
if mode == 'eval':
val_data_path = 'data/ted_dataset/lmdb_val'
eval_net_path = 'output/train_h36m_gesture_autoencoder/gesture_autoencoder_checkpoint_best.bin'
embed_space_evaluator = EmbeddingSpaceEvaluator(args, eval_net_path, lang_model, device)
val_dataset = load_dataset(val_data_path)
data_loader = DataLoader(dataset=val_dataset, batch_size=32, collate_fn=collate_fn,
shuffle=False, drop_last=True, num_workers=args.loader_workers)
val_dataset.set_lang_model(lang_model)
evaluate_testset(data_loader, generator, loss_fn, embed_space_evaluator, args)
elif mode == 'from_text':
random.seed()
examples = [
'<break time="0.5s"/><prosody>once handed me a very thick book. <break time="0.1s"/>it was his familys legacy</prosody>',
'<break time="0.5s"/>we can help millions of teens with counseling',
'what an amazing day that will be. what a big opportunity we have.',
'just the way a surgeon operates on a patient you can literally interact with your table',
'[Enter a new text]'
]
if option:
voice = option
else:
voice = 'en-female'
vid = random.sample(range(0, speaker_model.n_words), 1)[0]
tts = TTSHelper(cache_path='output/cached_wav')
# text input
for i, example in enumerate(examples):
print('(%d) %s' % (i, example))
try:
select = int(input("select: "))
except ValueError:
exit(0)
if select == len(examples) - 1:
input_text = input("text: ")
elif select >= len(examples) or select < 0:
print('Please input a valid number. Exiting...')
exit(0)
else:
input_text = examples[select]
# generation
text_without_tags = remove_tags_marks(input_text)
print(text_without_tags)
tts_filename = tts.synthesis(input_text, voice_name=voice, verbose=False)
sound_obj, duration = tts.get_sound_obj(tts_filename)
print('TTS complete (audio length: {0:.1f}s)'.format(duration))
audio, audio_sr = librosa.load(tts_filename, mono=True, sr=16000, res_type='kaiser_fast')
words_with_timestamps = align_words(audio, text_without_tags)
dir_vec = generate_gestures(args, generator, lang_model, audio, words_with_timestamps, vid=vid,
fade_out=False)
# make a video
save_path = 'output/generation_results'
os.makedirs(save_path, exist_ok=True)
prefix = '{}_vid_{}_{}'.format(text_without_tags[:50], vid, voice)
out_pos, _ = create_video_and_save(
save_path, 0, prefix, 0, None, dir_vec, mean_dir_vec, text_without_tags, audio=audio,
clipping_to_shortest_stream=True, delete_audio_file=False)
# save pkl
save_dict = {
'sentence': words_with_timestamps, 'audio': audio,
'out_dir_vec': dir_vec + mean_dir_vec, 'out_poses': out_pos,
'aux_info': ''
}
with open(os.path.join(result_save_path, '{}.pkl'.format(prefix)), 'wb') as f:
pickle.dump(save_dict, f)
elif mode == 'from_db_clip':
test_data_path = 'data/ted_dataset/lmdb_test'
save_path = 'output/generation_results'
clip_duration_range = [5, 12]
random.seed()
if option:
n_generations = int(option)
else:
n_generations = 5
# load clips and make gestures
n_saved = 0
lmdb_env = lmdb.open(test_data_path, readonly=True, lock=False)
with lmdb_env.begin(write=False) as txn:
keys = [key for key, _ in txn.cursor()]
while n_saved < n_generations: # loop until we get the desired number of results
# select video
key = random.choice(keys)
buf = txn.get(key)
video = pyarrow.deserialize(buf)
vid = video['vid']
clips = video['clips']
# select clip
n_clips = len(clips)
if n_clips == 0:
continue
clip_idx = random.randrange(n_clips)
clip_poses = clips[clip_idx]['skeletons_3d']
clip_audio = clips[clip_idx]['audio_raw']
clip_words = clips[clip_idx]['words']
clip_time = [clips[clip_idx]['start_time'], clips[clip_idx]['end_time']]
clip_poses = utils.data_utils.resample_pose_seq(clip_poses, clip_time[1] - clip_time[0],
args.motion_resampling_framerate)
target_dir_vec = utils.data_utils.convert_pose_seq_to_dir_vec(clip_poses)
target_dir_vec = target_dir_vec.reshape(target_dir_vec.shape[0], -1)
target_dir_vec -= mean_dir_vec
# check duration
clip_duration = clip_time[1] - clip_time[0]
if clip_duration < clip_duration_range[0] or clip_duration > clip_duration_range[1]:
continue
# synthesize
for selected_vi in range(len(clip_words)): # make start time of input text zero
clip_words[selected_vi][1] -= clip_time[0] # start time
clip_words[selected_vi][2] -= clip_time[0] # end time
vid_idx = random.sample(range(0, speaker_model.n_words), 1)[0]
out_dir_vec = generate_gestures(args, generator, lang_model, clip_audio, clip_words, vid=vid_idx,
seed_seq=target_dir_vec[0:args.n_pre_poses], fade_out=False)
# make a video
sentence_words = []
for word, _, _ in clip_words:
sentence_words.append(word)
sentence = ' '.join(sentence_words)
os.makedirs(save_path, exist_ok=True)
filename_prefix = '{}_{}_{}'.format(vid, vid_idx, clip_idx)
filename_prefix_for_video = filename_prefix
aux_str = '({}, time: {}-{})'.format(vid, str(datetime.timedelta(seconds=clip_time[0])),
str(datetime.timedelta(seconds=clip_time[1])))
create_video_and_save(
save_path, 0, filename_prefix_for_video, 0, target_dir_vec, out_dir_vec,
mean_dir_vec, sentence, audio=clip_audio, aux_str=aux_str,
clipping_to_shortest_stream=True, delete_audio_file=False)
# save pkl
out_dir_vec = out_dir_vec + mean_dir_vec
out_poses = convert_dir_vec_to_pose(out_dir_vec)
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec, 'out_poses': out_poses,
'aux_info': '{}_{}_{}'.format(vid, vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(os.path.join(save_path, '{}.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
n_saved += 1
else:
assert False, 'wrong mode'
if __name__ == '__main__':
mode = sys.argv[1] # {eval, from_db_clip, from_text}
ckpt_path = sys.argv[2]
option = None
if len(sys.argv) > 3:
option = sys.argv[3]
set_logger()
main(mode, ckpt_path, option)
| 18,129 | 40.0181 | 133 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train.py
|
import pprint
import time
from pathlib import Path
import sys
[sys.path.append(i) for i in ['.', '..']]
import matplotlib
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from model import speech2gesture, vocab
from model.embedding_net import EmbeddingNet
from model.seq2seq_net import Seq2SeqNet
from train_eval.train_gan import train_iter_gan
from train_eval.train_joint_embed import train_iter_embed, eval_embed
from train_eval.train_seq2seq import train_iter_seq2seq
from train_eval.train_speech2gesture import train_iter_speech2gesture
from utils.average_meter import AverageMeter
from utils.data_utils import convert_dir_vec_to_pose
from utils.vocab_utils import build_vocab
matplotlib.use('Agg') # we don't use interactive GUI
from config.parse_args import parse_args
from model.embedding_space_evaluator import EmbeddingSpaceEvaluator
from model.multimodal_context_net import PoseGenerator, ConvDiscriminator
from torch import optim
from data_loader.lmdb_data_loader import *
import utils.train_utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def init_model(args, lang_model, speaker_model, pose_dim, _device):
# init model
n_frames = args.n_poses
generator = discriminator = loss_fn = None
if args.model == 'multimodal_context':
generator = PoseGenerator(args,
n_words=lang_model.n_words,
word_embed_size=args.wordembed_dim,
word_embeddings=lang_model.word_embedding_weights,
z_obj=speaker_model,
pose_dim=pose_dim).to(_device)
discriminator = ConvDiscriminator(pose_dim).to(_device)
elif args.model == 'joint_embedding':
generator = EmbeddingNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights, mode='random').to(_device)
elif args.model == 'gesture_autoencoder':
generator = EmbeddingNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights, mode='pose').to(_device)
elif args.model == 'seq2seq':
generator = Seq2SeqNet(args, pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
lang_model.word_embedding_weights).to(_device)
loss_fn = torch.nn.L1Loss()
elif args.model == 'speech2gesture':
generator = speech2gesture.Generator(n_frames, pose_dim, args.n_pre_poses).to(_device)
discriminator = speech2gesture.Discriminator(pose_dim).to(_device)
loss_fn = torch.nn.L1Loss()
return generator, discriminator, loss_fn
def train_epochs(args, train_data_loader, test_data_loader, lang_model, pose_dim, speaker_model=None):
start = time.time()
loss_meters = [AverageMeter('loss'), AverageMeter('var_loss'), AverageMeter('gen'), AverageMeter('dis'),
AverageMeter('KLD'), AverageMeter('DIV_REG')]
best_val_loss = (1e+10, 0) # value, epoch
tb_path = args.name + '_' + str(datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
tb_writer = SummaryWriter(log_dir=str(Path(args.model_save_path).parent / 'tensorboard_runs' / tb_path))
# interval params
print_interval = int(len(train_data_loader) / 5)
save_sample_result_epoch_interval = 10
save_model_epoch_interval = 20
# z type
if args.z_type == 'speaker':
pass
elif args.z_type == 'random':
speaker_model = 1
else:
speaker_model = None
# init model
generator, discriminator, loss_fn = init_model(args, lang_model, speaker_model, pose_dim, device)
# use multi GPUs
if torch.cuda.device_count() > 1:
generator = torch.nn.DataParallel(generator)
if discriminator is not None:
discriminator = torch.nn.DataParallel(discriminator)
# prepare an evaluator for FGD
embed_space_evaluator = None
if args.eval_net_path and len(args.eval_net_path) > 0:
embed_space_evaluator = EmbeddingSpaceEvaluator(args, args.eval_net_path, lang_model, device)
# define optimizers
gen_optimizer = optim.Adam(generator.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
dis_optimizer = None
if discriminator is not None:
dis_optimizer = torch.optim.Adam(discriminator.parameters(),
lr=args.learning_rate * args.discriminator_lr_weight,
betas=(0.5, 0.999))
# training
global_iter = 0
best_values = {} # best values for all loss metrics
for epoch in range(args.epochs):
# evaluate the test set
val_metrics = evaluate_testset(test_data_loader, generator, loss_fn, embed_space_evaluator, args)
# write to tensorboard and save best values
for key in val_metrics.keys():
tb_writer.add_scalar(key + '/validation', val_metrics[key], global_iter)
if key not in best_values.keys() or val_metrics[key] < best_values[key][0]:
best_values[key] = (val_metrics[key], epoch)
# best?
if 'frechet' in val_metrics.keys():
val_loss = val_metrics['frechet']
else:
val_loss = val_metrics['loss']
is_best = val_loss < best_val_loss[0]
if is_best:
logging.info(' *** BEST VALIDATION LOSS: {:.3f}'.format(val_loss))
best_val_loss = (val_loss, epoch)
else:
logging.info(' best validation loss so far: {:.3f} at EPOCH {}'.format(best_val_loss[0], best_val_loss[1]))
# save model
if is_best or (epoch % save_model_epoch_interval == 0 and epoch > 0):
dis_state_dict = None
try: # multi gpu
gen_state_dict = generator.module.state_dict()
if discriminator is not None:
dis_state_dict = discriminator.module.state_dict()
except AttributeError: # single gpu
gen_state_dict = generator.state_dict()
if discriminator is not None:
dis_state_dict = discriminator.state_dict()
if is_best:
save_name = '{}/{}_checkpoint_best.bin'.format(args.model_save_path, args.name)
else:
save_name = '{}/{}_checkpoint_{:03d}.bin'.format(args.model_save_path, args.name, epoch)
utils.train_utils.save_checkpoint({
'args': args, 'epoch': epoch, 'lang_model': lang_model, 'speaker_model': speaker_model,
'pose_dim': pose_dim, 'gen_dict': gen_state_dict,
'dis_dict': dis_state_dict,
}, save_name)
# save sample results
if args.save_result_video and epoch % save_sample_result_epoch_interval == 0:
evaluate_sample_and_save_video(
epoch, args.name, test_data_loader, generator,
args=args, lang_model=lang_model)
# train iter
iter_start_time = time.time()
for iter_idx, data in enumerate(train_data_loader, 0):
global_iter += 1
in_text, text_lengths, in_text_padded, _, target_vec, in_audio, in_spec, aux_info = data
batch_size = target_vec.size(0)
in_text = in_text.to(device)
in_text_padded = in_text_padded.to(device)
in_audio = in_audio.to(device)
in_spec = in_spec.to(device)
target_vec = target_vec.to(device)
# speaker input
vid_indices = []
if speaker_model and isinstance(speaker_model, vocab.Vocab):
vids = aux_info['vid']
vid_indices = [speaker_model.word2index[vid] for vid in vids]
vid_indices = torch.LongTensor(vid_indices).to(device)
# train
loss = []
if args.model == 'multimodal_context':
loss = train_iter_gan(args, epoch, in_text_padded, in_audio, target_vec, vid_indices,
generator, discriminator,
gen_optimizer, dis_optimizer)
elif args.model == 'joint_embedding':
loss = train_iter_embed(args, epoch, in_text_padded, in_audio, target_vec,
generator, gen_optimizer, mode='random')
elif args.model == 'gesture_autoencoder':
loss = train_iter_embed(args, epoch, in_text_padded, in_audio, target_vec,
generator, gen_optimizer)
elif args.model == 'seq2seq':
loss = train_iter_seq2seq(args, epoch, in_text, text_lengths, target_vec, generator, gen_optimizer)
elif args.model == 'speech2gesture':
loss = train_iter_speech2gesture(args, in_spec, target_vec, generator, discriminator,
gen_optimizer, dis_optimizer, loss_fn)
# loss values
for loss_meter in loss_meters:
name = loss_meter.name
if name in loss:
loss_meter.update(loss[name], batch_size)
# write to tensorboard
for key in loss.keys():
tb_writer.add_scalar(key + '/train', loss[key], global_iter)
# print training status
if (iter_idx + 1) % print_interval == 0:
print_summary = 'EP {} ({:3d}) | {:>8s}, {:.0f} samples/s | '.format(
epoch, iter_idx + 1, utils.train_utils.time_since(start),
batch_size / (time.time() - iter_start_time))
for loss_meter in loss_meters:
if loss_meter.count > 0:
print_summary += '{}: {:.3f}, '.format(loss_meter.name, loss_meter.avg)
loss_meter.reset()
logging.info(print_summary)
iter_start_time = time.time()
tb_writer.close()
# print best losses
logging.info('--------- best loss values ---------')
for key in best_values.keys():
logging.info('{}: {:.3f} at EPOCH {}'.format(key, best_values[key][0], best_values[key][1]))
def evaluate_testset(test_data_loader, generator, loss_fn, embed_space_evaluator, args):
# to evaluation mode
generator.train(False)
if embed_space_evaluator:
embed_space_evaluator.reset()
losses = AverageMeter('loss')
joint_mae = AverageMeter('mae_on_joint')
accel = AverageMeter('accel')
start = time.time()
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
in_text, text_lengths, in_text_padded, _, target_vec, in_audio, in_spec, aux_info = data
batch_size = target_vec.size(0)
in_text = in_text.to(device)
in_text_padded = in_text_padded.to(device)
in_audio = in_audio.to(device)
in_spec = in_spec.to(device)
target = target_vec.to(device)
# speaker input
speaker_model = utils.train_utils.get_speaker_model(generator)
if speaker_model:
vid_indices = [random.choice(list(speaker_model.word2index.values())) for _ in range(batch_size)]
vid_indices = torch.LongTensor(vid_indices).to(device)
else:
vid_indices = None
pre_seq = target.new_zeros((target.shape[0], target.shape[1], target.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq_partial = pre_seq[:, 0:args.n_pre_poses, :-1]
if args.model == 'joint_embedding':
loss, out_dir_vec = eval_embed(in_text_padded, in_audio, pre_seq_partial,
target, generator, mode='speech')
elif args.model == 'gesture_autoencoder':
loss, _ = eval_embed(in_text_padded, in_audio, pre_seq_partial, target, generator)
elif args.model == 'seq2seq':
out_dir_vec = generator(in_text, text_lengths, target, None)
loss = loss_fn(out_dir_vec, target)
elif args.model == 'speech2gesture':
out_dir_vec = generator(in_spec, pre_seq_partial)
loss = loss_fn(out_dir_vec, target)
elif args.model == 'multimodal_context':
out_dir_vec, *_ = generator(pre_seq, in_text_padded, in_audio, vid_indices)
loss = F.l1_loss(out_dir_vec, target)
else:
assert False
losses.update(loss.item(), batch_size)
if args.model != 'gesture_autoencoder':
if embed_space_evaluator:
embed_space_evaluator.push_samples(in_text_padded, in_audio, out_dir_vec, target)
# calculate MAE of joint coordinates
out_dir_vec = out_dir_vec.cpu().numpy()
out_dir_vec += np.array(args.mean_dir_vec).squeeze()
out_joint_poses = convert_dir_vec_to_pose(out_dir_vec)
target_vec = target_vec.cpu().numpy()
target_vec += np.array(args.mean_dir_vec).squeeze()
target_poses = convert_dir_vec_to_pose(target_vec)
if out_joint_poses.shape[1] == args.n_poses:
diff = out_joint_poses[:, args.n_pre_poses:] - target_poses[:, args.n_pre_poses:]
else:
diff = out_joint_poses - target_poses[:, args.n_pre_poses:]
mae_val = np.mean(np.absolute(diff))
joint_mae.update(mae_val, batch_size)
# accel
target_acc = np.diff(target_poses, n=2, axis=1)
out_acc = np.diff(out_joint_poses, n=2, axis=1)
accel.update(np.mean(np.abs(target_acc - out_acc)), batch_size)
# back to training mode
generator.train(True)
# print
ret_dict = {'loss': losses.avg, 'joint_mae': joint_mae.avg}
elapsed_time = time.time() - start
if embed_space_evaluator and embed_space_evaluator.get_no_of_samples() > 0:
frechet_dist, feat_dist = embed_space_evaluator.get_scores()
logging.info(
'[VAL] loss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f}, FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(
losses.avg, joint_mae.avg, accel.avg, frechet_dist, feat_dist, elapsed_time))
ret_dict['frechet'] = frechet_dist
ret_dict['feat_dist'] = feat_dist
else:
logging.info('[VAL] loss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(
losses.avg, joint_mae.avg, elapsed_time))
return ret_dict
def evaluate_sample_and_save_video(epoch, prefix, test_data_loader, generator, args, lang_model,
n_save=None, save_path=None):
generator.train(False) # eval mode
start = time.time()
if not n_save:
n_save = 1 if epoch <= 0 else 5
out_raw = []
with torch.no_grad():
for iter_idx, data in enumerate(test_data_loader, 0):
if iter_idx >= n_save: # save N samples
break
in_text, text_lengths, in_text_padded, _, target_dir_vec, in_audio, in_spec, aux_info = data
# prepare
select_index = 0
if args.model == 'seq2seq':
in_text = in_text[select_index, :].unsqueeze(0).to(device)
text_lengths = text_lengths[select_index].unsqueeze(0).to(device)
in_text_padded = in_text_padded[select_index, :].unsqueeze(0).to(device)
in_audio = in_audio[select_index, :].unsqueeze(0).to(device)
in_spec = in_spec[select_index, :, :].unsqueeze(0).to(device)
target_dir_vec = target_dir_vec[select_index, :, :].unsqueeze(0).to(device)
input_words = []
for i in range(in_text_padded.shape[1]):
word_idx = int(in_text_padded.data[select_index, i])
if word_idx > 0:
input_words.append(lang_model.index2word[word_idx])
sentence = ' '.join(input_words)
# speaker input
speaker_model = utils.train_utils.get_speaker_model(generator)
if speaker_model:
vid = aux_info['vid'][select_index]
# vid_indices = [speaker_model.word2index[vid]]
vid_indices = [random.choice(list(speaker_model.word2index.values()))]
vid_indices = torch.LongTensor(vid_indices).to(device)
else:
vid_indices = None
# aux info
aux_str = '({}, time: {}-{})'.format(
aux_info['vid'][select_index],
str(datetime.timedelta(seconds=aux_info['start_time'][select_index].item())),
str(datetime.timedelta(seconds=aux_info['end_time'][select_index].item())))
# synthesize
pre_seq = target_dir_vec.new_zeros((target_dir_vec.shape[0], target_dir_vec.shape[1],
target_dir_vec.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target_dir_vec[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
pre_seq_partial = pre_seq[:, 0:args.n_pre_poses, :-1]
if args.model == 'multimodal_context':
out_dir_vec, *_ = generator(pre_seq, in_text_padded, in_audio, vid_indices)
elif args.model == 'joint_embedding':
_, _, _, _, _, _, out_dir_vec = generator(in_text_padded, in_audio, pre_seq_partial, None, 'speech')
elif args.model == 'gesture_autoencoder':
_, _, _, _, _, _, out_dir_vec = generator(in_text_padded, in_audio, pre_seq_partial, target_dir_vec,
variational_encoding=False)
elif args.model == 'seq2seq':
out_dir_vec = generator(in_text, text_lengths, target_dir_vec, None)
# out_poses = torch.cat((pre_poses, out_poses), dim=1)
elif args.model == 'speech2gesture':
out_dir_vec = generator(in_spec, pre_seq_partial)
# to video
audio_npy = np.squeeze(in_audio.cpu().numpy())
target_dir_vec = np.squeeze(target_dir_vec.cpu().numpy())
out_dir_vec = np.squeeze(out_dir_vec.cpu().numpy())
if save_path is None:
save_path = args.model_save_path
mean_data = np.array(args.mean_dir_vec).reshape(-1, 3)
utils.train_utils.create_video_and_save(
save_path, epoch, prefix, iter_idx,
target_dir_vec, out_dir_vec, mean_data,
sentence, audio=audio_npy, aux_str=aux_str)
target_dir_vec = target_dir_vec.reshape((target_dir_vec.shape[0], 9, 3))
out_dir_vec = out_dir_vec.reshape((out_dir_vec.shape[0], 9, 3))
out_raw.append({
'sentence': sentence,
'audio': audio_npy,
'human_dir_vec': target_dir_vec + mean_data,
'out_dir_vec': out_dir_vec + mean_data,
'aux_info': aux_str
})
generator.train(True) # back to training mode
logging.info('saved sample videos, took {:.1f}s'.format(time.time() - start))
return out_raw
def main(config):
args = config['args']
# random seed
if args.random_seed >= 0:
utils.train_utils.set_random_seed(args.random_seed)
# set logger
utils.train_utils.set_logger(args.model_save_path, os.path.basename(__file__).replace('.py', '.log'))
logging.info("PyTorch version: {}".format(torch.__version__))
logging.info("CUDA version: {}".format(torch.version.cuda))
logging.info("{} GPUs, default {}".format(torch.cuda.device_count(), device))
logging.info(pprint.pformat(vars(args)))
# dataset config
if args.model == 'seq2seq':
collate_fn = word_seq_collate_fn
else:
collate_fn = default_collate_fn
# dataset
mean_dir_vec = np.array(args.mean_dir_vec).reshape(-1, 3)
train_dataset = SpeechMotionDataset(args.train_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose,
remove_word_timing=(args.input_context == 'text')
)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True, num_workers=args.loader_workers, pin_memory=True,
collate_fn=collate_fn
)
val_dataset = SpeechMotionDataset(args.val_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=train_dataset.speaker_model,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose,
remove_word_timing=(args.input_context == 'text')
)
test_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size,
shuffle=False, drop_last=True, num_workers=args.loader_workers, pin_memory=True,
collate_fn=collate_fn
)
test_dataset = SpeechMotionDataset(args.test_data_path[0],
n_poses=args.n_poses,
subdivision_stride=args.subdivision_stride,
pose_resampling_fps=args.motion_resampling_framerate,
speaker_model=train_dataset.speaker_model,
mean_dir_vec=mean_dir_vec,
mean_pose=args.mean_pose)
# build vocab
vocab_cache_path = os.path.join(os.path.split(args.train_data_path[0])[0], 'vocab_cache.pkl')
lang_model = build_vocab('words', [train_dataset, val_dataset, test_dataset], vocab_cache_path, args.wordembed_path,
args.wordembed_dim)
train_dataset.set_lang_model(lang_model)
val_dataset.set_lang_model(lang_model)
# train
pose_dim = 27 # 9 x 3
train_epochs(args, train_loader, test_loader, lang_model,
pose_dim=pose_dim, speaker_model=train_dataset.speaker_model)
if __name__ == '__main__':
_args = parse_args()
main({'args': _args})
| 23,232 | 45.005941 | 120 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/motion_preprocessor.py
|
import numpy as np
class MotionPreprocessor:
def __init__(self, skeletons, mean_pose):
self.skeletons = np.array(skeletons)
self.mean_pose = np.array(mean_pose).reshape(-1, 3)
self.filtering_message = "PASS"
def get(self):
assert (self.skeletons is not None)
# filtering
if self.skeletons != []:
if self.check_pose_diff():
self.skeletons = []
self.filtering_message = "pose"
elif self.check_spine_angle():
self.skeletons = []
self.filtering_message = "spine angle"
elif self.check_static_motion():
self.skeletons = []
self.filtering_message = "motion"
if self.skeletons != []:
self.skeletons = self.skeletons.tolist()
for i, frame in enumerate(self.skeletons):
assert not np.isnan(self.skeletons[i]).any() # missing joints
return self.skeletons, self.filtering_message
def check_static_motion(self, verbose=False):
def get_variance(skeleton, joint_idx):
wrist_pos = skeleton[:, joint_idx]
variance = np.sum(np.var(wrist_pos, axis=0))
return variance
left_arm_var = get_variance(self.skeletons, 6)
right_arm_var = get_variance(self.skeletons, 9)
th = 0.0014 # exclude 13110
# th = 0.002 # exclude 16905
if left_arm_var < th and right_arm_var < th:
if verbose:
print('skip - check_static_motion left var {}, right var {}'.format(left_arm_var, right_arm_var))
return True
else:
if verbose:
print('pass - check_static_motion left var {}, right var {}'.format(left_arm_var, right_arm_var))
return False
def check_pose_diff(self, verbose=False):
diff = np.abs(self.skeletons - self.mean_pose)
diff = np.mean(diff)
# th = 0.017
th = 0.02 # exclude 3594
if diff < th:
if verbose:
print('skip - check_pose_diff {:.5f}'.format(diff))
return True
else:
if verbose:
print('pass - check_pose_diff {:.5f}'.format(diff))
return False
def check_spine_angle(self, verbose=False):
def angle_between(v1, v2):
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
angles = []
for i in range(self.skeletons.shape[0]):
spine_vec = self.skeletons[i, 1] - self.skeletons[i, 0]
angle = angle_between(spine_vec, [0, -1, 0])
angles.append(angle)
if np.rad2deg(max(angles)) > 30 or np.rad2deg(np.mean(angles)) > 20: # exclude 4495
# if np.rad2deg(max(angles)) > 20: # exclude 8270
if verbose:
print('skip - check_spine_angle {:.5f}, {:.5f}'.format(max(angles), np.mean(angles)))
return True
else:
if verbose:
print('pass - check_spine_angle {:.5f}'.format(max(angles)))
return False
| 3,196 | 35.329545 | 113 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/data_preprocessor.py
|
""" create data samples """
import logging
from collections import defaultdict
import lmdb
import math
import numpy as np
import pyarrow
import tqdm
from sklearn.preprocessing import normalize
import utils.data_utils
from data_loader.motion_preprocessor import MotionPreprocessor
class DataPreprocessor:
def __init__(self, clip_lmdb_dir, out_lmdb_dir, n_poses, subdivision_stride,
pose_resampling_fps, mean_pose, mean_dir_vec, disable_filtering=False):
self.n_poses = n_poses
self.subdivision_stride = subdivision_stride
self.skeleton_resampling_fps = pose_resampling_fps
self.mean_pose = mean_pose
self.mean_dir_vec = mean_dir_vec
self.disable_filtering = disable_filtering
self.src_lmdb_env = lmdb.open(clip_lmdb_dir, readonly=True, lock=False)
with self.src_lmdb_env.begin() as txn:
self.n_videos = txn.stat()['entries']
self.spectrogram_sample_length = utils.data_utils.calc_spectrogram_length_from_motion_length(self.n_poses, self.skeleton_resampling_fps)
self.audio_sample_length = int(self.n_poses / self.skeleton_resampling_fps * 16000)
# create db for samples
map_size = 1024 * 50 # in MB
map_size <<= 20 # in B
self.dst_lmdb_env = lmdb.open(out_lmdb_dir, map_size=map_size)
self.n_out_samples = 0
def run(self):
n_filtered_out = defaultdict(int)
src_txn = self.src_lmdb_env.begin(write=False)
# sampling and normalization
cursor = src_txn.cursor()
for key, value in cursor:
video = pyarrow.deserialize(value)
vid = video['vid']
clips = video['clips']
for clip_idx, clip in enumerate(clips):
filtered_result = self._sample_from_clip(vid, clip)
for type in filtered_result.keys():
n_filtered_out[type] += filtered_result[type]
# print stats
with self.dst_lmdb_env.begin() as txn:
print('no. of samples: ', txn.stat()['entries'])
n_total_filtered = 0
for type, n_filtered in n_filtered_out.items():
print('{}: {}'.format(type, n_filtered))
n_total_filtered += n_filtered
print('no. of excluded samples: {} ({:.1f}%)'.format(
n_total_filtered, 100 * n_total_filtered / (txn.stat()['entries'] + n_total_filtered)))
# close db
self.src_lmdb_env.close()
self.dst_lmdb_env.sync()
self.dst_lmdb_env.close()
def _sample_from_clip(self, vid, clip):
clip_skeleton = clip['skeletons_3d']
clip_audio = clip['audio_feat']
clip_audio_raw = clip['audio_raw']
clip_word_list = clip['words']
clip_s_f, clip_e_f = clip['start_frame_no'], clip['end_frame_no']
clip_s_t, clip_e_t = clip['start_time'], clip['end_time']
n_filtered_out = defaultdict(int)
# skeleton resampling
clip_skeleton = utils.data_utils.resample_pose_seq(clip_skeleton, clip_e_t - clip_s_t, self.skeleton_resampling_fps)
# divide
aux_info = []
sample_skeletons_list = []
sample_words_list = []
sample_audio_list = []
sample_spectrogram_list = []
num_subdivision = math.floor(
(len(clip_skeleton) - self.n_poses)
/ self.subdivision_stride) + 1 # floor((K - (N+M)) / S) + 1
expected_audio_length = utils.data_utils.calc_spectrogram_length_from_motion_length(len(clip_skeleton), self.skeleton_resampling_fps)
assert abs(expected_audio_length - clip_audio.shape[1]) <= 5, 'audio and skeleton lengths are different'
for i in range(num_subdivision):
start_idx = i * self.subdivision_stride
fin_idx = start_idx + self.n_poses
sample_skeletons = clip_skeleton[start_idx:fin_idx]
subdivision_start_time = clip_s_t + start_idx / self.skeleton_resampling_fps
subdivision_end_time = clip_s_t + fin_idx / self.skeleton_resampling_fps
sample_words = self.get_words_in_time_range(word_list=clip_word_list,
start_time=subdivision_start_time,
end_time=subdivision_end_time)
# spectrogram
audio_start = math.floor(start_idx / len(clip_skeleton) * clip_audio.shape[1])
audio_end = audio_start + self.spectrogram_sample_length
if audio_end > clip_audio.shape[1]: # correct size mismatch between poses and audio
# logging.info('expanding audio array, audio start={}, end={}, clip_length={}'.format(
# audio_start, audio_end, clip_audio.shape[1]))
n_padding = audio_end - clip_audio.shape[1]
padded_data = np.pad(clip_audio, ((0, 0), (0, n_padding)), mode='symmetric')
sample_spectrogram = padded_data[:, audio_start:audio_end]
else:
sample_spectrogram = clip_audio[:, audio_start:audio_end]
# raw audio
audio_start = math.floor(start_idx / len(clip_skeleton) * len(clip_audio_raw))
audio_end = audio_start + self.audio_sample_length
if audio_end > len(clip_audio_raw): # correct size mismatch between poses and audio
# logging.info('expanding audio array, audio start={}, end={}, clip_length={}'.format(
# audio_start, audio_end, len(clip_audio_raw)))
n_padding = audio_end - len(clip_audio_raw)
padded_data = np.pad(clip_audio_raw, (0, n_padding), mode='symmetric')
sample_audio = padded_data[audio_start:audio_end]
else:
sample_audio = clip_audio_raw[audio_start:audio_end]
if len(sample_words) >= 2:
# filtering motion skeleton data
sample_skeletons, filtering_message = MotionPreprocessor(sample_skeletons, self.mean_pose).get()
is_correct_motion = (sample_skeletons != [])
motion_info = {'vid': vid,
'start_frame_no': clip_s_f + start_idx,
'end_frame_no': clip_s_f + fin_idx,
'start_time': subdivision_start_time,
'end_time': subdivision_end_time,
'is_correct_motion': is_correct_motion, 'filtering_message': filtering_message}
if is_correct_motion or self.disable_filtering:
sample_skeletons_list.append(sample_skeletons)
sample_words_list.append(sample_words)
sample_audio_list.append(sample_audio)
sample_spectrogram_list.append(sample_spectrogram)
aux_info.append(motion_info)
else:
n_filtered_out[filtering_message] += 1
if len(sample_skeletons_list) > 0:
with self.dst_lmdb_env.begin(write=True) as txn:
for words, poses, audio, spectrogram, aux in zip(sample_words_list, sample_skeletons_list,
sample_audio_list, sample_spectrogram_list,
aux_info):
# preprocessing for poses
poses = np.asarray(poses)
dir_vec = utils.data_utils.convert_pose_seq_to_dir_vec(poses)
normalized_dir_vec = self.normalize_dir_vec(dir_vec, self.mean_dir_vec)
# save
k = '{:010}'.format(self.n_out_samples).encode('ascii')
v = [words, poses, normalized_dir_vec, audio, spectrogram, aux]
v = pyarrow.serialize(v).to_buffer()
txn.put(k, v)
self.n_out_samples += 1
return n_filtered_out
@staticmethod
def normalize_dir_vec(dir_vec, mean_dir_vec):
return dir_vec - mean_dir_vec
@staticmethod
def get_words_in_time_range(word_list, start_time, end_time):
words = []
for word in word_list:
_, word_s, word_e = word[0], word[1], word[2]
if word_s >= end_time:
break
if word_e <= start_time:
continue
words.append(word)
return words
@staticmethod
def unnormalize_data(normalized_data, data_mean, data_std, dimensions_to_ignore):
"""
this method is from https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
"""
T = normalized_data.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
origData[:, dimensions_to_use] = normalized_data
# potentially inefficient, but only done once per experiment
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
origData = np.multiply(origData, stdMat) + meanMat
return origData
| 9,499 | 42.981481 | 144 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/calculate_motion_stats.py
|
import os
import lmdb
import numpy as np
import pyarrow
import utils.train_utils
import utils.data_utils
def calculate_data_mean(base_path):
lmdb_path = os.path.join(base_path, 'lmdb_train')
lmdb_env = lmdb.open(lmdb_path, readonly=True, lock=False)
with lmdb_env.begin() as txn:
n_videos = txn.stat()['entries']
src_txn = lmdb_env.begin(write=False)
cursor = src_txn.cursor()
pose_seq_list = []
total_duration = 0
for key, value in cursor:
video = pyarrow.deserialize(value)
vid = video['vid']
clips = video['clips']
for clip_idx, clip in enumerate(clips):
poses = clip['skeletons_3d']
pose_seq_list.append(poses)
total_duration += (clip['end_time'] - clip['start_time'])
# close db
lmdb_env.close()
all_poses = np.vstack(pose_seq_list)
mean_pose = np.mean(all_poses, axis=0)
# mean dir vec
dir_vec = utils.data_utils.convert_pose_seq_to_dir_vec(all_poses)
mean_dir_vec = np.mean(dir_vec, axis=0)
# mean bone length
bone_lengths = []
for i, pair in enumerate(utils.data_utils.dir_vec_pairs):
vec = all_poses[:, pair[1]] - all_poses[:, pair[0]]
bone_lengths.append(np.mean(np.linalg.norm(vec, axis=1)))
print('mean pose', repr(mean_pose.flatten()))
print('mean directional vector', repr(mean_dir_vec.flatten()))
print('mean bone lengths', repr(bone_lengths))
print('total duration of the valid clips: {:.1f} h'.format(total_duration/3600))
if __name__ == '__main__':
import matplotlib
matplotlib.use('TkAgg')
np.set_printoptions(precision=7, suppress=True)
lmdb_base_path = '../../data/ted_dataset'
calculate_data_mean(lmdb_base_path)
| 1,748 | 28.644068 | 84 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/h36m_loader.py
|
import math
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from utils.data_utils import convert_pose_seq_to_dir_vec, convert_dir_vec_to_pose
train_subject = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
test_subject = ['S11']
class Human36M(Dataset):
def __init__(self, path, mean_data, is_train=True, augment=False):
n_poses = 34
target_joints = [1, 6, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27] # see https://github.com/kenkra/3d-pose-baseline-vmd/wiki/body
self.is_train = is_train
self.augment = augment
self.mean_data = mean_data
self.data = []
if is_train:
subjects = train_subject
else:
subjects = test_subject
# loading data and normalize
frame_stride = 2
data = np.load(path, allow_pickle=True)['positions_3d'].item()
for subject, actions in data.items():
if subject not in subjects:
continue
for action_name, positions in actions.items():
positions = positions[:, target_joints]
positions = self.normalize(positions)
for f in range(0, len(positions), 10):
if f+n_poses*frame_stride > len(positions):
break
self.data.append(positions[f:f+n_poses*frame_stride:frame_stride])
def __getitem__(self, index):
poses = self.data[index]
dir_vec = convert_pose_seq_to_dir_vec(poses)
poses = convert_dir_vec_to_pose(dir_vec)
if self.augment: # data augmentation by adding gaussian noises on joints coordinates
rand_val = random.random()
if rand_val < 0.2:
poses = poses.copy()
poses += np.random.normal(0, 0.002 ** 0.5, poses.shape)
else:
poses = poses.copy()
poses += np.random.normal(0, 0.0001 ** 0.5, poses.shape)
dir_vec = convert_pose_seq_to_dir_vec(poses)
dir_vec = dir_vec.reshape(dir_vec.shape[0], -1)
dir_vec = dir_vec - self.mean_data
poses = torch.from_numpy(poses).float()
dir_vec = torch.from_numpy(dir_vec).float()
return poses, dir_vec
def __len__(self):
return len(self.data)
def normalize(self, data):
# pose normalization
for f in range(data.shape[0]):
data[f, :] -= data[f, 2]
data[f, :, (0, 1, 2)] = data[f, :, (0, 2, 1)] # xy exchange
data[f, :, 1] = -data[f, :, 1] # invert y
# frontalize based on hip joints
for f in range(data.shape[0]):
hip_vec = data[f, 1] - data[f, 0]
angle = np.pi - np.math.atan2(hip_vec[2], hip_vec[0]) # angles on XZ plane
if 180 > np.rad2deg(angle) > 0:
pass
elif 180 < np.rad2deg(angle) < 360:
angle = angle - np.deg2rad(360)
rot = self.rotation_matrix([0, 1, 0], angle)
data[f] = np.matmul(data[f], rot)
data = data[:, 2:] # exclude hip joints
return data
@staticmethod
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
| 3,853 | 34.685185 | 134 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/data_loader/lmdb_data_loader.py
|
import datetime
import logging
import os
import pickle
import random
import numpy as np
import lmdb as lmdb
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import utils.train_utils
import utils.data_utils
from model.vocab import Vocab
from data_loader.data_preprocessor import DataPreprocessor
import pyarrow
def word_seq_collate_fn(data):
""" collate function for loading word sequences in variable lengths """
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x[0]), reverse=True)
# separate source and target sequences
word_seq, text_padded, poses_seq, vec_seq, audio, spectrogram, aux_info = zip(*data)
# merge sequences
words_lengths = torch.LongTensor([len(x) for x in word_seq])
word_seq = pad_sequence(word_seq, batch_first=True).long()
text_padded = default_collate(text_padded)
poses_seq = default_collate(poses_seq)
vec_seq = default_collate(vec_seq)
audio = default_collate(audio)
spectrogram = default_collate(spectrogram)
aux_info = {key: default_collate([d[key] for d in aux_info]) for key in aux_info[0]}
return word_seq, words_lengths, text_padded, poses_seq, vec_seq, audio, spectrogram, aux_info
def default_collate_fn(data):
_, text_padded, pose_seq, vec_seq, audio, spectrogram, aux_info = zip(*data)
text_padded = default_collate(text_padded)
pose_seq = default_collate(pose_seq)
vec_seq = default_collate(vec_seq)
audio = default_collate(audio)
spectrogram = default_collate(spectrogram)
aux_info = {key: default_collate([d[key] for d in aux_info]) for key in aux_info[0]}
return torch.tensor([0]), torch.tensor([0]), text_padded, pose_seq, vec_seq, audio, spectrogram, aux_info
class SpeechMotionDataset(Dataset):
def __init__(self, lmdb_dir, n_poses, subdivision_stride, pose_resampling_fps, mean_pose, mean_dir_vec,
speaker_model=None, remove_word_timing=False):
self.lmdb_dir = lmdb_dir
self.n_poses = n_poses
self.subdivision_stride = subdivision_stride
self.skeleton_resampling_fps = pose_resampling_fps
self.mean_dir_vec = mean_dir_vec
self.remove_word_timing = remove_word_timing
self.expected_audio_length = int(round(n_poses / pose_resampling_fps * 16000))
self.expected_spectrogram_length = utils.data_utils.calc_spectrogram_length_from_motion_length(
n_poses, pose_resampling_fps)
self.lang_model = None
logging.info("Reading data '{}'...".format(lmdb_dir))
preloaded_dir = lmdb_dir + '_cache'
if not os.path.exists(preloaded_dir):
logging.info('Creating the dataset cache...')
assert mean_dir_vec is not None
if mean_dir_vec.shape[-1] != 3:
mean_dir_vec = mean_dir_vec.reshape(mean_dir_vec.shape[:-1] + (-1, 3))
n_poses_extended = int(round(n_poses * 1.25)) # some margin
data_sampler = DataPreprocessor(lmdb_dir, preloaded_dir, n_poses_extended,
subdivision_stride, pose_resampling_fps, mean_pose, mean_dir_vec)
data_sampler.run()
else:
logging.info('Found the cache {}'.format(preloaded_dir))
# init lmdb
self.lmdb_env = lmdb.open(preloaded_dir, readonly=True, lock=False)
with self.lmdb_env.begin() as txn:
self.n_samples = txn.stat()['entries']
# make a speaker model
if speaker_model is None or speaker_model == 0:
precomputed_model = lmdb_dir + '_speaker_model.pkl'
if not os.path.exists(precomputed_model):
self._make_speaker_model(lmdb_dir, precomputed_model)
else:
with open(precomputed_model, 'rb') as f:
self.speaker_model = pickle.load(f)
else:
self.speaker_model = speaker_model
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
with self.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(idx).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, aux_info = sample
def extend_word_seq(lang, words, end_time=None):
n_frames = self.n_poses
if end_time is None:
end_time = aux_info['end_time']
frame_duration = (end_time - aux_info['start_time']) / n_frames
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
if self.remove_word_timing:
n_words = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
n_words += 1
space = int(n_frames / (n_words + 1))
for i in range(n_words):
idx = (i+1) * space
extended_word_indices[idx] = lang.get_word_index(words[i][0])
else:
prev_idx = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
extended_word_indices[idx] = lang.get_word_index(word[0])
# extended_word_indices[prev_idx:idx+1] = lang.get_word_index(word[0])
prev_idx = idx
return torch.Tensor(extended_word_indices).long()
def words_to_tensor(lang, words, end_time=None):
indexes = [lang.SOS_token]
for word in words:
if end_time is not None and word[1] > end_time:
break
indexes.append(lang.get_word_index(word[0]))
indexes.append(lang.EOS_token)
return torch.Tensor(indexes).long()
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * self.n_poses / vec_seq.shape[0]
audio = utils.data_utils.make_audio_fixed_length(audio, self.expected_audio_length)
spectrogram = spectrogram[:, 0:self.expected_spectrogram_length]
vec_seq = vec_seq[0:self.n_poses]
pose_seq = pose_seq[0:self.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = words_to_tensor(self.lang_model, word_seq, sample_end_time)
extended_word_seq = extend_word_seq(self.lang_model, word_seq, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
audio = torch.from_numpy(audio).float()
spectrogram = torch.from_numpy(spectrogram)
return word_seq_tensor, extended_word_seq, pose_seq, vec_seq, audio, spectrogram, aux_info
def set_lang_model(self, lang_model):
self.lang_model = lang_model
def _make_speaker_model(self, lmdb_dir, cache_path):
logging.info(' building a speaker model...')
speaker_model = Vocab('vid', insert_default_tokens=False)
lmdb_env = lmdb.open(lmdb_dir, readonly=True, lock=False)
txn = lmdb_env.begin(write=False)
cursor = txn.cursor()
for key, value in cursor:
video = pyarrow.deserialize(value)
vid = video['vid']
speaker_model.index_word(vid)
lmdb_env.close()
logging.info(' indexed %d videos' % speaker_model.n_words)
self.speaker_model = speaker_model
# cache
with open(cache_path, 'wb') as f:
pickle.dump(self.speaker_model, f)
| 8,022 | 39.933673 | 109 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_speech2gesture.py
|
import torch
import torch.nn.functional as F
def train_iter_speech2gesture(args, in_spec, target_poses, pose_decoder, discriminator,
pose_dec_optim, dis_optim, loss_fn):
# generation
pre_poses = target_poses[:, 0:args.n_pre_poses]
out_poses = pose_decoder(in_spec, pre_poses)
# to motion
target_motion = target_poses[:, 1:] - target_poses[:, :-1]
out_motion = out_poses[:, 1:] - out_poses[:, :-1]
###########################################################################################
# train D
dis_optim.zero_grad()
dis_real = discriminator(target_motion)
dis_fake = discriminator(out_motion.detach())
dis_error = F.mse_loss(torch.ones_like(dis_real), dis_real) + F.mse_loss(torch.zeros_like(dis_fake), dis_fake)
dis_error.backward()
dis_optim.step()
###########################################################################################
# train G
pose_dec_optim.zero_grad()
l1_loss = loss_fn(out_poses, target_poses)
dis_output = discriminator(out_motion)
gen_error = F.mse_loss(torch.ones_like(dis_output), dis_output)
loss = args.loss_regression_weight * l1_loss + args.loss_gan_weight * gen_error
loss.backward()
pose_dec_optim.step()
return {'loss': args.loss_regression_weight * l1_loss.item(), 'gen': args.loss_gan_weight * gen_error.item(),
'dis': dis_error.item()}
| 1,430 | 36.657895 | 114 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_joint_embed.py
|
import torch
import torch.nn.functional as F
def train_iter_embed(args, epoch, in_text, in_audio, target_data, net, optim, mode=None):
pre_seq = target_data[:, 0:args.n_pre_poses]
# zero gradients
optim.zero_grad()
if mode == 'random': # joint embed model
variational_encoding = False # AE
else: # feature extractor in FGD
variational_encoding = False # VAE or AE
# reconstruction loss
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_data = \
net(in_text, in_audio, pre_seq, target_data, mode, variational_encoding=variational_encoding)
recon_loss = F.l1_loss(recon_data, target_data, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
if False: # use pose diff
target_diff = target_data[:, 1:] - target_data[:, :-1]
recon_diff = recon_data[:, 1:] - recon_data[:, :-1]
recon_loss += torch.mean(F.l1_loss(recon_diff, target_diff, reduction='none'), dim=(1, 2))
recon_loss = torch.sum(recon_loss)
# KLD
if variational_encoding:
if net.mode == 'speech':
KLD = -0.5 * torch.sum(1 + context_logvar - context_mu.pow(2) - context_logvar.exp())
else:
KLD = -0.5 * torch.sum(1 + pose_logvar - pose_mu.pow(2) - pose_logvar.exp())
if epoch < 10:
KLD_weight = 0
else:
KLD_weight = min(1.0, (epoch - 10) * args.loss_kld_weight)
loss = args.loss_regression_weight * recon_loss + KLD_weight * KLD
else:
loss = recon_loss
loss.backward()
optim.step()
ret_dict = {'loss': recon_loss.item()}
if variational_encoding:
ret_dict['KLD'] = KLD.item()
return ret_dict
def eval_embed(in_text, in_audio, pre_poses, target_poses, net, mode=None):
context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, recon_poses = \
net(in_text, in_audio, pre_poses, target_poses, mode, variational_encoding=False)
recon_loss = F.l1_loss(recon_poses, target_poses, reduction='none')
recon_loss = torch.mean(recon_loss, dim=(1, 2))
loss = torch.mean(recon_loss)
return loss, recon_poses
| 2,191 | 33.793651 | 101 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_gan.py
|
import random
import numpy as np
import torch
import torch.nn.functional as F
def add_noise(data):
noise = torch.randn_like(data) * 0.1
return data + noise
def train_iter_gan(args, epoch, in_text, in_audio, target_poses, vid_indices,
pose_decoder, discriminator,
pose_dec_optim, dis_optim):
warm_up_epochs = args.loss_warmup
use_noisy_target = False
# make pre seq input
pre_seq = target_poses.new_zeros((target_poses.shape[0], target_poses.shape[1], target_poses.shape[2] + 1))
pre_seq[:, 0:args.n_pre_poses, :-1] = target_poses[:, 0:args.n_pre_poses]
pre_seq[:, 0:args.n_pre_poses, -1] = 1 # indicating bit for constraints
###########################################################################################
# train D
dis_error = None
if epoch > warm_up_epochs and args.loss_gan_weight > 0.0:
dis_optim.zero_grad()
out_dir_vec, *_ = pose_decoder(pre_seq, in_text, in_audio, vid_indices) # out shape (batch x seq x dim)
if use_noisy_target:
noise_target = add_noise(target_poses)
noise_out = add_noise(out_dir_vec.detach())
dis_real = discriminator(noise_target, in_text)
dis_fake = discriminator(noise_out, in_text)
else:
dis_real = discriminator(target_poses, in_text)
dis_fake = discriminator(out_dir_vec.detach(), in_text)
dis_error = torch.sum(-torch.mean(torch.log(dis_real + 1e-8) + torch.log(1 - dis_fake + 1e-8))) # ns-gan
dis_error.backward()
dis_optim.step()
###########################################################################################
# train G
pose_dec_optim.zero_grad()
# decoding
out_dir_vec, z, z_mu, z_logvar = pose_decoder(pre_seq, in_text, in_audio, vid_indices)
# loss
beta = 0.1
huber_loss = F.smooth_l1_loss(out_dir_vec / beta, target_poses / beta) * beta
dis_output = discriminator(out_dir_vec, in_text)
gen_error = -torch.mean(torch.log(dis_output + 1e-8))
kld = div_reg = None
if (args.z_type == 'speaker' or args.z_type == 'random') and args.loss_reg_weight > 0.0:
if args.z_type == 'speaker':
# enforcing divergent gestures btw original vid and other vid
rand_idx = torch.randperm(vid_indices.shape[0])
rand_vids = vid_indices[rand_idx]
else:
rand_vids = None
out_dir_vec_rand_vid, z_rand_vid, _, _ = pose_decoder(pre_seq, in_text, in_audio, rand_vids)
beta = 0.05
pose_l1 = F.smooth_l1_loss(out_dir_vec / beta, out_dir_vec_rand_vid.detach() / beta, reduction='none') * beta
pose_l1 = pose_l1.sum(dim=1).sum(dim=1)
pose_l1 = pose_l1.view(pose_l1.shape[0], -1).mean(1)
z_l1 = F.l1_loss(z.detach(), z_rand_vid.detach(), reduction='none')
z_l1 = z_l1.view(z_l1.shape[0], -1).mean(1)
div_reg = -(pose_l1 / (z_l1 + 1.0e-5))
div_reg = torch.clamp(div_reg, min=-1000)
div_reg = div_reg.mean()
if args.z_type == 'speaker':
# speaker embedding KLD
kld = -0.5 * torch.mean(1 + z_logvar - z_mu.pow(2) - z_logvar.exp())
loss = args.loss_regression_weight * huber_loss + args.loss_kld_weight * kld + args.loss_reg_weight * div_reg
else:
loss = args.loss_regression_weight * huber_loss + args.loss_reg_weight * div_reg
else:
loss = args.loss_regression_weight * huber_loss #+ var_loss
if epoch > warm_up_epochs:
loss += args.loss_gan_weight * gen_error
loss.backward()
pose_dec_optim.step()
ret_dict = {'loss': args.loss_regression_weight * huber_loss.item()}
if kld:
ret_dict['KLD'] = args.loss_kld_weight * kld.item()
if div_reg:
ret_dict['DIV_REG'] = args.loss_reg_weight * div_reg.item()
if epoch > warm_up_epochs and args.loss_gan_weight > 0.0:
ret_dict['gen'] = args.loss_gan_weight * gen_error.item()
ret_dict['dis'] = dis_error.item()
return ret_dict
| 4,074 | 37.809524 | 121 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/train_eval/train_seq2seq.py
|
import logging
import torch
import torch.nn.functional as F
loss_i = 0
def custom_loss(output, target, args, epoch):
n_element = output.numel()
# mae
mse_loss = F.mse_loss(output, target)
mse_loss *= args.loss_regression_weight
# continuous motion
diff = [abs(output[:, n, :] - output[:, n-1, :]) for n in range(1, output.shape[1])]
cont_loss = torch.sum(torch.stack(diff)) / n_element
cont_loss *= args.loss_kld_weight
# motion variance
norm = torch.norm(output, 2, 1) # output shape (batch, seq, dim)
var_loss = -torch.sum(norm) / n_element
var_loss *= args.loss_reg_weight
loss = mse_loss + cont_loss + var_loss
# debugging code
global loss_i
if loss_i == 1000:
logging.debug('(custom loss) mse %.5f, cont %.5f, var %.5f'
% (mse_loss.item(), cont_loss.item(), var_loss.item()))
loss_i = 0
loss_i += 1
return loss
def train_iter_seq2seq(args, epoch, in_text, in_lengths, target_poses, net, optim):
# zero gradients
optim.zero_grad()
# generation
outputs = net(in_text, in_lengths, target_poses, None)
# loss
loss = custom_loss(outputs, target_poses, args, epoch)
loss.backward()
# optimize
torch.nn.utils.clip_grad_norm_(net.parameters(), 5)
optim.step()
return {'loss': loss.item()}
| 1,354 | 25.057692 | 88 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/utils/tts_helper.py
|
import datetime
import os
import time
from google.cloud import texttospeech
from pygame import mixer
class TTSHelper:
""" helper class for google TTS
set the environment variable GOOGLE_APPLICATION_CREDENTIALS first
GOOGLE_APPLICATION_CREDENTIALS = 'path to json key file'
"""
cache_folder = './cached_wav/'
def __init__(self, cache_path=None):
if cache_path is not None:
self.cache_folder = cache_path
# create cache folder
try:
os.makedirs(self.cache_folder)
except OSError:
pass
# init tts
self.client = texttospeech.TextToSpeechClient()
self.voice_en_female = texttospeech.types.VoiceSelectionParams(
language_code='en-US', name='en-US-Wavenet-F')
self.voice_en_male = texttospeech.types.VoiceSelectionParams(
language_code='en-US', name='en-US-Wavenet-D')
self.audio_config_en = texttospeech.types.AudioConfig(
speaking_rate=1.0,
audio_encoding=texttospeech.enums.AudioEncoding.LINEAR16)
# init player
mixer.init()
# clean up cache folder
self._cleanup_cachefolder()
def _cleanup_cachefolder(self):
""" remove least accessed files in the cache """
dir_to_search = self.cache_folder
for dirpath, dirnames, filenames in os.walk(dir_to_search):
for file in filenames:
curpath = os.path.join(dirpath, file)
file_accessed = datetime.datetime.fromtimestamp(os.path.getatime(curpath))
if datetime.datetime.now() - file_accessed > datetime.timedelta(days=30):
os.remove(curpath)
def _string2numeric_hash(self, text):
import hashlib
return int(hashlib.md5(text.encode('utf-8')).hexdigest()[:16], 16)
def synthesis(self, ssml_text, voice_name='en-female', verbose=False):
if not ssml_text.startswith(u'<speak>'):
ssml_text = u'<speak>' + ssml_text + u'</speak>'
filename = os.path.join(self.cache_folder, str(self._string2numeric_hash(voice_name + ssml_text)) + '.wav')
# load or synthesis audio
if not os.path.exists(filename):
if verbose:
start = time.time()
# let's synthesis
if voice_name == 'en-female':
voice = self.voice_en_female
audio_config = self.audio_config_en
elif voice_name == 'en-male':
voice = self.voice_en_male
audio_config = self.audio_config_en
else:
raise ValueError
synthesis_input = texttospeech.types.SynthesisInput(ssml=ssml_text)
response = self.client.synthesize_speech(synthesis_input, voice, audio_config)
if verbose:
print('TTS took {0:.2f} seconds'.format(time.time() - start))
start = time.time()
# save to a file
with open(filename, 'wb') as out:
out.write(response.audio_content)
if verbose:
print('written to a file "{}"'.format(filename))
else:
if verbose:
print('use the cached wav "{}"'.format(filename))
return filename
def get_sound_obj(self, filename):
# play
sound = mixer.Sound(filename)
length = sound.get_length()
return sound, length
def play(self, sound):
sound.play(loops=0)
| 3,522 | 32.552381 | 115 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/utils/average_meter.py
|
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
| 597 | 23.916667 | 71 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/utils/data_utils.py
|
import re
import librosa
import numpy as np
import torch
from scipy.interpolate import interp1d
from sklearn.preprocessing import normalize
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
skeleton_line_pairs = [(0, 1, 'b'), (1, 2, 'darkred'), (2, 3, 'r'), (3, 4, 'orange'), (1, 5, 'darkgreen'),
(5, 6, 'limegreen'), (6, 7, 'darkseagreen')]
dir_vec_pairs = [(0, 1, 0.26), (1, 2, 0.18), (2, 3, 0.14), (1, 4, 0.22), (4, 5, 0.36),
(5, 6, 0.33), (1, 7, 0.22), (7, 8, 0.36), (8, 9, 0.33)] # adjacency and bone length
def normalize_string(s):
""" lowercase, trim, and remove non-letter characters """
s = s.lower().strip()
s = re.sub(r"([,.!?])", r" \1 ", s) # isolate some marks
s = re.sub(r"(['])", r"", s) # remove apostrophe
s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) # replace other characters with whitespace
s = re.sub(r"\s+", r" ", s).strip()
return s
def remove_tags_marks(text):
reg_expr = re.compile('<.*?>|[.,:;!?]+')
clean_text = re.sub(reg_expr, '', text)
return clean_text
def extract_melspectrogram(y, sr=16000):
melspec = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=512, power=2)
log_melspec = librosa.power_to_db(melspec, ref=np.max) # mels x time
log_melspec = log_melspec.astype('float16')
return log_melspec
def calc_spectrogram_length_from_motion_length(n_frames, fps):
ret = (n_frames / fps * 16000 - 1024) / 512 + 1
return int(round(ret))
def resample_pose_seq(poses, duration_in_sec, fps):
n = len(poses)
x = np.arange(0, n)
y = poses
f = interp1d(x, y, axis=0, kind='linear', fill_value='extrapolate')
expected_n = duration_in_sec * fps
x_new = np.arange(0, n, n / expected_n)
interpolated_y = f(x_new)
if hasattr(poses, 'dtype'):
interpolated_y = interpolated_y.astype(poses.dtype)
return interpolated_y
def time_stretch_for_words(words, start_time, speech_speed_rate):
for i in range(len(words)):
if words[i][1] > start_time:
words[i][1] = start_time + (words[i][1] - start_time) / speech_speed_rate
words[i][2] = start_time + (words[i][2] - start_time) / speech_speed_rate
return words
def make_audio_fixed_length(audio, expected_audio_length):
n_padding = expected_audio_length - len(audio)
if n_padding > 0:
audio = np.pad(audio, (0, n_padding), mode='symmetric')
else:
audio = audio[0:expected_audio_length]
return audio
def convert_dir_vec_to_pose(vec):
vec = np.array(vec)
if vec.shape[-1] != 3:
vec = vec.reshape(vec.shape[:-1] + (-1, 3))
if len(vec.shape) == 2:
joint_pos = np.zeros((10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[pair[1]] = joint_pos[pair[0]] + pair[2] * vec[j]
elif len(vec.shape) == 3:
joint_pos = np.zeros((vec.shape[0], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, pair[1]] = joint_pos[:, pair[0]] + pair[2] * vec[:, j]
elif len(vec.shape) == 4: # (batch, seq, 9, 3)
joint_pos = np.zeros((vec.shape[0], vec.shape[1], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, :, pair[1]] = joint_pos[:, :, pair[0]] + pair[2] * vec[:, :, j]
else:
assert False
return joint_pos
def convert_pose_seq_to_dir_vec(pose):
if pose.shape[-1] != 3:
pose = pose.reshape(pose.shape[:-1] + (-1, 3))
if len(pose.shape) == 3:
dir_vec = np.zeros((pose.shape[0], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, i] = pose[:, pair[1]] - pose[:, pair[0]]
dir_vec[:, i, :] = normalize(dir_vec[:, i, :], axis=1) # to unit length
elif len(pose.shape) == 4: # (batch, seq, ...)
dir_vec = np.zeros((pose.shape[0], pose.shape[1], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, :, i] = pose[:, :, pair[1]] - pose[:, :, pair[0]]
for j in range(dir_vec.shape[0]): # batch
for i in range(len(dir_vec_pairs)):
dir_vec[j, :, i, :] = normalize(dir_vec[j, :, i, :], axis=1) # to unit length
else:
assert False
return dir_vec
| 4,295 | 34.504132 | 106 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/utils/vocab_utils.py
|
import logging
import os
import pickle
import lmdb
import pyarrow
from model.vocab import Vocab
def build_vocab(name, dataset_list, cache_path, word_vec_path=None, feat_dim=None):
logging.info(' building a language model...')
if not os.path.exists(cache_path):
lang_model = Vocab(name)
for dataset in dataset_list:
logging.info(' indexing words from {}'.format(dataset.lmdb_dir))
index_words(lang_model, dataset.lmdb_dir)
if word_vec_path is not None:
lang_model.load_word_vectors(word_vec_path, feat_dim)
with open(cache_path, 'wb') as f:
pickle.dump(lang_model, f)
else:
logging.info(' loaded from {}'.format(cache_path))
with open(cache_path, 'rb') as f:
lang_model = pickle.load(f)
if word_vec_path is None:
lang_model.word_embedding_weights = None
elif lang_model.word_embedding_weights.shape[0] != lang_model.n_words:
logging.warning(' failed to load word embedding weights. check this')
assert False
return lang_model
def index_words(lang_model, lmdb_dir):
lmdb_env = lmdb.open(lmdb_dir, readonly=True, lock=False)
txn = lmdb_env.begin(write=False)
cursor = txn.cursor()
for key, buf in cursor:
video = pyarrow.deserialize(buf)
for clip in video['clips']:
for word_info in clip['words']:
word = word_info[0]
lang_model.index_word(word)
lmdb_env.close()
logging.info(' indexed %d words' % lang_model.n_words)
# filtering vocab
# MIN_COUNT = 3
# lang_model.trim(MIN_COUNT)
| 1,678 | 27.948276 | 84 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/utils/train_utils.py
|
import logging
import os
import pickle
import random
import subprocess
from collections import defaultdict, namedtuple
from logging.handlers import RotatingFileHandler
from textwrap import wrap
import numpy as np
import re
import time
import math
import soundfile as sf
import librosa.display
import matplotlib
import matplotlib.pyplot as plt
import torch
import matplotlib.ticker as ticker
import matplotlib.animation as animation
from mpl_toolkits import mplot3d
import utils.data_utils
import train
import data_loader.lmdb_data_loader
# only for unicode characters, you may remove these two lines
from model import vocab
matplotlib.rcParams['axes.unicode_minus'] = False
def set_logger(log_path=None, log_filename='log'):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if log_path is not None:
os.makedirs(log_path, exist_ok=True)
handlers.append(
RotatingFileHandler(os.path.join(log_path, log_filename), maxBytes=10 * 1024 * 1024, backupCount=5))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s: %(message)s', handlers=handlers)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since):
now = time.time()
s = now - since
return '%s' % as_minutes(s)
def create_video_and_save(save_path, epoch, prefix, iter_idx, target, output, mean_data, title,
audio=None, aux_str=None, clipping_to_shortest_stream=False, delete_audio_file=True):
print('rendering a video...')
start = time.time()
fig = plt.figure(figsize=(8, 4))
axes = [fig.add_subplot(1, 2, 1, projection='3d'), fig.add_subplot(1, 2, 2, projection='3d')]
axes[0].view_init(elev=20, azim=-60)
axes[1].view_init(elev=20, azim=-60)
fig_title = title
if aux_str:
fig_title += ('\n' + aux_str)
fig.suptitle('\n'.join(wrap(fig_title, 75)), fontsize='medium')
# un-normalization and convert to poses
mean_data = mean_data.flatten()
output = output + mean_data
output_poses = utils.data_utils.convert_dir_vec_to_pose(output)
target_poses = None
if target is not None:
target = target + mean_data
target_poses = utils.data_utils.convert_dir_vec_to_pose(target)
def animate(i):
for k, name in enumerate(['human', 'generated']):
if name == 'human' and target is not None and i < len(target):
pose = target_poses[i]
elif name == 'generated' and i < len(output):
pose = output_poses[i]
else:
pose = None
if pose is not None:
axes[k].clear()
for j, pair in enumerate(utils.data_utils.dir_vec_pairs):
axes[k].plot([pose[pair[0], 0], pose[pair[1], 0]],
[pose[pair[0], 2], pose[pair[1], 2]],
[pose[pair[0], 1], pose[pair[1], 1]],
zdir='z', linewidth=5)
axes[k].set_xlim3d(-0.5, 0.5)
axes[k].set_ylim3d(0.5, -0.5)
axes[k].set_zlim3d(0.5, -0.5)
axes[k].set_xlabel('x')
axes[k].set_ylabel('z')
axes[k].set_zlabel('y')
axes[k].set_title('{} ({}/{})'.format(name, i + 1, len(output)))
if target is not None:
num_frames = max(len(target), len(output))
else:
num_frames = len(output)
ani = animation.FuncAnimation(fig, animate, interval=30, frames=num_frames, repeat=False)
# show audio
audio_path = None
if audio is not None:
assert len(audio.shape) == 1 # 1-channel, raw signal
audio = audio.astype(np.float32)
sr = 16000
audio_path = '{}/{}_{:03d}_{}.wav'.format(save_path, prefix, epoch, iter_idx)
sf.write(audio_path, audio, sr)
# save video
try:
video_path = '{}/temp_{}_{:03d}_{}.mp4'.format(save_path, prefix, epoch, iter_idx)
ani.save(video_path, fps=15, dpi=80) # dpi 150 for a higher resolution
del ani
plt.close(fig)
except RuntimeError:
assert False, 'RuntimeError'
# merge audio and video
if audio is not None:
merged_video_path = '{}/{}_{:03d}_{}.mp4'.format(save_path, prefix, epoch, iter_idx)
cmd = ['ffmpeg', '-loglevel', 'panic', '-y', '-i', video_path, '-i', audio_path, '-strict', '-2',
merged_video_path]
if clipping_to_shortest_stream:
cmd.insert(len(cmd) - 1, '-shortest')
subprocess.call(cmd)
if delete_audio_file:
os.remove(audio_path)
os.remove(video_path)
print('done, took {:.1f} seconds'.format(time.time() - start))
return output_poses, target_poses
def save_checkpoint(state, filename):
torch.save(state, filename)
logging.info('Saved the checkpoint')
def get_speaker_model(net):
try:
if hasattr(net, 'module'):
speaker_model = net.module.z_obj
else:
speaker_model = net.z_obj
except AttributeError:
speaker_model = None
if not isinstance(speaker_model, vocab.Vocab):
speaker_model = None
return speaker_model
def load_checkpoint_and_model(checkpoint_path, _device='cpu'):
print('loading checkpoint {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=_device)
args = checkpoint['args']
epoch = checkpoint['epoch']
lang_model = checkpoint['lang_model']
speaker_model = checkpoint['speaker_model']
pose_dim = checkpoint['pose_dim']
print('epoch {}'.format(epoch))
generator, discriminator, loss_fn = train.init_model(args, lang_model, speaker_model, pose_dim, _device)
generator.load_state_dict(checkpoint['gen_dict'])
# set to eval mode
generator.train(False)
return args, generator, loss_fn, lang_model, speaker_model, pose_dim
def set_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
| 6,259 | 31.604167 | 112 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/seq2seq_net.py
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import math
'''
Based on the following Se2Seq implementations:
- https://github.com/AuCson/PyTorch-Batch-Attention-Seq2seq
- https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation/seq2seq-translation-batched.ipynb
'''
class EncoderRNN(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers=1, dropout=0.5, pre_trained_embedding=None):
super(EncoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
if pre_trained_embedding is not None: # use pre-trained embedding (e.g., word2vec, glove)
assert pre_trained_embedding.shape[0] == input_size
assert pre_trained_embedding.shape[1] == embed_size
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pre_trained_embedding), freeze=False)
else:
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, input_seqs, input_lengths, hidden=None):
'''
:param input_seqs:
Variable of shape (num_step(T),batch_size(B)), sorted decreasingly by lengths(for packing)
:param input_lengths:
list of sequence length
:param hidden:
initial state of GRU
:returns:
GRU outputs in shape (T,B,hidden_size(H))
last hidden stat of RNN(i.e. last output for GRU)
'''
if self.do_flatten_parameters:
self.gru.flatten_parameters()
embedded = self.embedding(input_seqs)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
outputs, hidden = self.gru(packed, hidden)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs
return outputs, hidden
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
'''
:param hidden:
previous hidden state of the decoder, in shape (layers*directions,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (T,B,H)
:return
attention energies in shape (B,T)
'''
max_len = encoder_outputs.size(0)
this_batch_size = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
encoder_outputs = encoder_outputs.transpose(0, 1) # [B*T*H]
attn_energies = self.score(H, encoder_outputs) # compute attention score
return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax
def score(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2, 1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) # [B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy.squeeze(1) # [B*T]
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1, dropout_p=0.1,
discrete_representation=False, speaker_model=None):
super(BahdanauAttnDecoderRNN, self).__init__()
# define parameters
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.discrete_representation = discrete_representation
self.speaker_model = speaker_model
# define embedding layer
if self.discrete_representation:
self.embedding = nn.Embedding(output_size, hidden_size)
self.dropout = nn.Dropout(dropout_p)
if self.speaker_model:
self.speaker_embedding = nn.Embedding(speaker_model.n_words, 8)
# calc input size
if self.discrete_representation:
input_size = hidden_size # embedding size
linear_input_size = input_size + hidden_size
if self.speaker_model:
linear_input_size += 8
# define layers
self.pre_linear = nn.Sequential(
nn.Linear(linear_input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True)
)
self.attn = Attn(hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p)
# self.out = nn.Linear(hidden_size * 2, output_size)
self.out = nn.Linear(hidden_size, output_size)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def freeze_attn(self):
for param in self.attn.parameters():
param.requires_grad = False
def forward(self, motion_input, last_hidden, encoder_outputs, vid_indices=None):
'''
:param motion_input:
motion input for current time step, in shape [batch x dim]
:param last_hidden:
last hidden state of the decoder, in shape [layers x batch x hidden_size]
:param encoder_outputs:
encoder outputs in shape [steps x batch x hidden_size]
:param vid_indices:
:return
decoder output
Note: we run this one step at a time i.e. you should use a outer loop
to process the whole sequence
'''
if self.do_flatten_parameters:
self.gru.flatten_parameters()
if self.discrete_representation:
word_embedded = self.embedding(motion_input).view(1, motion_input.size(0), -1) # [1 x B x embedding_dim]
motion_input = self.dropout(word_embedded)
else:
motion_input = motion_input.view(1, motion_input.size(0), -1) # [1 x batch x dim]
# attention
attn_weights = self.attn(last_hidden[-1], encoder_outputs) # [batch x 1 x T]
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # [batch x 1 x attn_size]
context = context.transpose(0, 1) # [1 x batch x attn_size]
# make input vec
rnn_input = torch.cat((motion_input, context), 2) # [1 x batch x (dim + attn_size)]
if self.speaker_model:
assert vid_indices is not None
speaker_context = self.speaker_embedding(vid_indices).unsqueeze(0)
rnn_input = torch.cat((rnn_input, speaker_context), 2) # [1 x batch x (dim + attn_size + embed_size)]
rnn_input = self.pre_linear(rnn_input.squeeze(0))
rnn_input = rnn_input.unsqueeze(0)
# rnn
output, hidden = self.gru(rnn_input, last_hidden)
# post-fc
output = output.squeeze(0) # [1 x batch x hidden_size] -> [batch x hidden_size]
output = self.out(output)
return output, hidden, attn_weights
class Generator(nn.Module):
def __init__(self, args, motion_dim, discrete_representation=False, speaker_model=None):
super(Generator, self).__init__()
self.output_size = motion_dim
self.n_layers = args.n_layers
self.discrete_representation = discrete_representation
self.decoder = BahdanauAttnDecoderRNN(input_size=motion_dim + args.GAN_noise_size,
hidden_size=args.hidden_size,
output_size=self.output_size,
n_layers=self.n_layers,
dropout_p=args.dropout_prob,
discrete_representation=discrete_representation,
speaker_model=speaker_model)
def freeze_attn(self):
self.decoder.freeze_attn()
def forward(self, z, motion_input, last_hidden, encoder_output, vid_indices=None):
if z is None:
input_with_noise_vec = motion_input
else:
assert not self.discrete_representation # not valid for discrete representation
input_with_noise_vec = torch.cat([motion_input, z], dim=1) # [bs x (10+z_size)]
return self.decoder(input_with_noise_vec, last_hidden, encoder_output, vid_indices)
class Seq2SeqNet(nn.Module):
def __init__(self, args, pose_dim, n_frames, n_words, word_embed_size, word_embeddings, speaker_model=None):
super().__init__()
self.encoder = EncoderRNN(
n_words, word_embed_size, args.hidden_size, args.n_layers,
dropout=args.dropout_prob, pre_trained_embedding=word_embeddings)
self.decoder = Generator(args, pose_dim, speaker_model=speaker_model)
# variable for storing outputs
self.n_frames = n_frames
self.n_pre_poses = args.n_pre_poses
def forward(self, in_text, in_lengths, poses, vid_indices):
# reshape to (seq x batch x dim)
in_text = in_text.transpose(0, 1)
poses = poses.transpose(0, 1)
outputs = torch.zeros(self.n_frames, poses.size(1), self.decoder.output_size).to(poses.device)
# run words through encoder
encoder_outputs, encoder_hidden = self.encoder(in_text, in_lengths, None)
decoder_hidden = encoder_hidden[:self.decoder.n_layers] # use last hidden state from encoder
# run through decoder one time step at a time
decoder_input = poses[0] # initial pose from the dataset
outputs[0] = decoder_input
for t in range(1, self.n_frames):
decoder_output, decoder_hidden, _ = self.decoder(None, decoder_input, decoder_hidden, encoder_outputs,
vid_indices)
outputs[t] = decoder_output
if t < self.n_pre_poses:
decoder_input = poses[t] # next input is current target
else:
decoder_input = decoder_output # next input is current prediction
return outputs.transpose(0, 1)
| 10,719 | 41.039216 | 117 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/embedding_space_evaluator.py
|
import time
import numpy as np
import torch
import torch.nn.functional as F
import umap
from scipy import linalg
from model.embedding_net import EmbeddingNet
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning) # ignore warnings
class EmbeddingSpaceEvaluator:
def __init__(self, args, embed_net_path, lang_model, device):
self.n_pre_poses = args.n_pre_poses
# init embed net
ckpt = torch.load(embed_net_path, map_location=device)
n_frames = args.n_poses
word_embeddings = lang_model.word_embedding_weights
mode = 'pose'
self.pose_dim = ckpt['pose_dim']
self.net = EmbeddingNet(args, self.pose_dim, n_frames, lang_model.n_words, args.wordembed_dim,
word_embeddings, mode).to(device)
self.net.load_state_dict(ckpt['gen_dict'])
self.net.train(False)
# storage
self.context_feat_list = []
self.real_feat_list = []
self.generated_feat_list = []
self.recon_err_diff = []
def reset(self):
self.context_feat_list = []
self.real_feat_list = []
self.generated_feat_list = []
self.recon_err_diff = []
def get_no_of_samples(self):
return len(self.real_feat_list)
def push_samples(self, context_text, context_spec, generated_poses, real_poses):
# convert poses to latent features
pre_poses = real_poses[:, 0:self.n_pre_poses]
context_feat, _, _, real_feat, _, _, real_recon = self.net(context_text, context_spec, pre_poses, real_poses,
'pose', variational_encoding=False)
_, _, _, generated_feat, _, _, generated_recon = self.net(None, None, pre_poses, generated_poses,
'pose', variational_encoding=False)
if context_feat:
self.context_feat_list.append(context_feat.data.cpu().numpy())
self.real_feat_list.append(real_feat.data.cpu().numpy())
self.generated_feat_list.append(generated_feat.data.cpu().numpy())
# reconstruction error
recon_err_real = F.l1_loss(real_poses, real_recon).item()
recon_err_fake = F.l1_loss(generated_poses, generated_recon).item()
self.recon_err_diff.append(recon_err_fake - recon_err_real)
def get_features_for_viz(self):
generated_feats = np.vstack(self.generated_feat_list)
real_feats = np.vstack(self.real_feat_list)
transformed_feats = umap.UMAP().fit_transform(np.vstack((generated_feats, real_feats)))
n = int(transformed_feats.shape[0] / 2)
generated_feats = transformed_feats[0:n, :]
real_feats = transformed_feats[n:, :]
return real_feats, generated_feats
def get_scores(self):
generated_feats = np.vstack(self.generated_feat_list)
real_feats = np.vstack(self.real_feat_list)
def frechet_distance(samples_A, samples_B):
A_mu = np.mean(samples_A, axis=0)
A_sigma = np.cov(samples_A, rowvar=False)
B_mu = np.mean(samples_B, axis=0)
B_sigma = np.cov(samples_B, rowvar=False)
try:
frechet_dist = self.calculate_frechet_distance(A_mu, A_sigma, B_mu, B_sigma)
except ValueError:
frechet_dist = 1e+10
return frechet_dist
####################################################################
# frechet distance
frechet_dist = frechet_distance(generated_feats, real_feats)
####################################################################
# distance between real and generated samples on the latent feature space
dists = []
for i in range(real_feats.shape[0]):
d = np.sum(np.absolute(real_feats[i] - generated_feats[i])) # MAE
dists.append(d)
feat_dist = np.mean(dists)
return frechet_dist, feat_dist
@staticmethod
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
""" from https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py """
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
| 6,387 | 39.687898 | 117 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/embedding_net.py
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.multimodal_context_net import WavEncoder, TextEncoderTCN
def reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def ConvNormRelu(in_channels, out_channels, downsample=False, padding=0, batchnorm=True):
if not downsample:
k = 3
s = 1
else:
k = 4
s = 2
conv_block = nn.Conv1d(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm1d(out_channels)
if batchnorm:
net = nn.Sequential(
conv_block,
norm_block,
nn.LeakyReLU(0.2, True)
)
else:
net = nn.Sequential(
conv_block,
nn.LeakyReLU(0.2, True)
)
return net
class PoseEncoderConv(nn.Module):
def __init__(self, length, dim):
super().__init__()
self.net = nn.Sequential(
ConvNormRelu(dim, 32, batchnorm=True),
ConvNormRelu(32, 64, batchnorm=True),
ConvNormRelu(64, 64, True, batchnorm=True),
nn.Conv1d(64, 32, 3)
)
self.out_net = nn.Sequential(
# nn.Linear(864, 256), # for 64 frames
nn.Linear(384, 256), # for 34 frames
nn.BatchNorm1d(256),
nn.LeakyReLU(True),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.LeakyReLU(True),
nn.Linear(128, 32),
)
self.fc_mu = nn.Linear(32, 32)
self.fc_logvar = nn.Linear(32, 32)
def forward(self, poses, variational_encoding):
# encode
poses = poses.transpose(1, 2) # to (bs, dim, seq)
out = self.net(poses)
out = out.flatten(1)
out = self.out_net(out)
# return out, None, None
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
if variational_encoding:
z = reparameterize(mu, logvar)
else:
z = mu
return z, mu, logvar
class PoseDecoderFC(nn.Module):
def __init__(self, gen_length, pose_dim, use_pre_poses=False):
super().__init__()
self.gen_length = gen_length
self.pose_dim = pose_dim
self.use_pre_poses = use_pre_poses
in_size = 32
if use_pre_poses:
self.pre_pose_net = nn.Sequential(
nn.Linear(pose_dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
in_size += 32
self.net = nn.Sequential(
nn.Linear(in_size, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, gen_length * pose_dim),
)
def forward(self, latent_code, pre_poses=None):
if self.use_pre_poses:
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, latent_code), dim=1)
else:
feat = latent_code
output = self.net(feat)
output = output.view(-1, self.gen_length, self.pose_dim)
return output
class PoseDecoderGRU(nn.Module):
def __init__(self, gen_length, pose_dim):
super().__init__()
self.gen_length = gen_length
self.pose_dim = pose_dim
self.in_size = 32 + 32
self.hidden_size = 300
self.pre_pose_net = nn.Sequential(
nn.Linear(pose_dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
self.gru = nn.GRU(self.in_size, hidden_size=self.hidden_size, num_layers=4, batch_first=True,
bidirectional=True, dropout=0.3)
self.out = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size // 2),
nn.LeakyReLU(True),
nn.Linear(self.hidden_size // 2, pose_dim)
)
def forward(self, latent_code, pre_poses):
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, latent_code), dim=1)
feat = feat.unsqueeze(1).repeat(1, self.gen_length, 1)
output, decoder_hidden = self.gru(feat)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
output = self.out(output.reshape(-1, output.shape[2]))
output = output.view(pre_poses.shape[0], self.gen_length, -1)
return output
class PoseDecoderConv(nn.Module):
def __init__(self, length, dim, use_pre_poses=False):
super().__init__()
self.use_pre_poses = use_pre_poses
feat_size = 32
if use_pre_poses:
self.pre_pose_net = nn.Sequential(
nn.Linear(dim * 4, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 32),
)
feat_size += 32
if length == 64:
self.pre_net = nn.Sequential(
nn.Linear(feat_size, 128),
nn.BatchNorm1d(128),
nn.LeakyReLU(True),
nn.Linear(128, 256),
)
elif length == 34:
self.pre_net = nn.Sequential(
nn.Linear(feat_size, 64),
nn.BatchNorm1d(64),
nn.LeakyReLU(True),
nn.Linear(64, 136),
)
else:
assert False
self.net = nn.Sequential(
nn.ConvTranspose1d(4, 32, 3),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.2, True),
nn.ConvTranspose1d(32, 32, 3),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.2, True),
nn.Conv1d(32, 32, 3),
nn.Conv1d(32, dim, 3),
)
def forward(self, feat, pre_poses=None):
if self.use_pre_poses:
pre_pose_feat = self.pre_pose_net(pre_poses.reshape(pre_poses.shape[0], -1))
feat = torch.cat((pre_pose_feat, feat), dim=1)
out = self.pre_net(feat)
out = out.view(feat.shape[0], 4, -1)
out = self.net(out)
out = out.transpose(1, 2)
return out
class ContextEncoder(nn.Module):
def __init__(self, args, n_frames, n_words, word_embed_size, word_embeddings):
super().__init__()
# encoders
self.text_encoder = TextEncoderTCN(args, n_words, word_embed_size, pre_trained_embedding=word_embeddings)
self.audio_encoder = WavEncoder()
self.gru = nn.GRU(32+32, hidden_size=256, num_layers=2,
bidirectional=False, batch_first=True)
self.out = nn.Sequential(
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU(inplace=True),
nn.Linear(128, 32)
)
self.fc_mu = nn.Linear(32, 32)
self.fc_logvar = nn.Linear(32, 32)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, in_text, in_spec):
if self.do_flatten_parameters:
self.gru.flatten_parameters()
text_feat_seq, _ = self.text_encoder(in_text)
audio_feat_seq = self.audio_encoder(in_spec)
input = torch.cat((audio_feat_seq, text_feat_seq), dim=2)
output, _ = self.gru(input)
last_output = output[:, -1]
out = self.out(last_output)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
z = reparameterize(mu, logvar)
return z, mu, logvar
class EmbeddingNet(nn.Module):
def __init__(self, args, pose_dim, n_frames, n_words, word_embed_size, word_embeddings, mode):
super().__init__()
if mode != 'pose':
self.context_encoder = ContextEncoder(args, n_frames, n_words, word_embed_size, word_embeddings)
self.pose_encoder = PoseEncoderConv(n_frames, pose_dim)
# self.decoder = PoseDecoderFC(n_frames, pose_dim, use_pre_poses=True)
self.decoder = PoseDecoderGRU(n_frames, pose_dim)
else:
self.context_encoder = None
self.pose_encoder = PoseEncoderConv(n_frames, pose_dim)
self.decoder = PoseDecoderConv(n_frames, pose_dim)
self.mode = mode
def forward(self, in_text, in_audio, pre_poses, poses, input_mode=None, variational_encoding=False):
if input_mode is None:
assert self.mode is not None
input_mode = self.mode
# context
if self.context_encoder is not None and in_text is not None and in_audio is not None:
context_feat, context_mu, context_logvar = self.context_encoder(in_text, in_audio)
# context_feat = F.normalize(context_feat, p=2, dim=1)
else:
context_feat = context_mu = context_logvar = None
# poses
if poses is not None:
poses_feat, pose_mu, pose_logvar = self.pose_encoder(poses, variational_encoding)
# poses_feat = F.normalize(poses_feat, p=2, dim=1)
else:
poses_feat = pose_mu = pose_logvar = None
# decoder
if input_mode == 'random':
input_mode = 'speech' if random.random() > 0.5 else 'pose'
if input_mode == 'speech':
latent_feat = context_feat
elif input_mode == 'pose':
latent_feat = poses_feat
else:
assert False
out_poses = self.decoder(latent_feat, pre_poses)
return context_feat, context_mu, context_logvar, poses_feat, pose_mu, pose_logvar, out_poses
def freeze_pose_nets(self):
for param in self.pose_encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
if __name__ == '__main__':
# for model debugging
n_frames = 64
pose_dim = 10
encoder = PoseEncoderConv(n_frames, pose_dim)
decoder = PoseDecoderConv(n_frames, pose_dim)
poses = torch.randn((4, n_frames, pose_dim))
feat, _, _ = encoder(poses, True)
recon_poses = decoder(feat)
print('input', poses.shape)
print('feat', feat.shape)
print('output', recon_poses.shape)
| 10,527 | 30.806647 | 113 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/vocab.py
|
import logging
import os
import pickle
import numpy as np
import fasttext
class Vocab:
PAD_token = 0
SOS_token = 1
EOS_token = 2
UNK_token = 3
def __init__(self, name, insert_default_tokens=True):
self.name = name
self.trimmed = False
self.word_embedding_weights = None
self.reset_dictionary(insert_default_tokens)
def reset_dictionary(self, insert_default_tokens=True):
self.word2index = {}
self.word2count = {}
if insert_default_tokens:
self.index2word = {self.PAD_token: "<PAD>", self.SOS_token: "<SOS>",
self.EOS_token: "<EOS>", self.UNK_token: "<UNK>"}
else:
self.index2word = {self.UNK_token: "<UNK>"}
self.n_words = len(self.index2word) # count default tokens
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def add_vocab(self, other_vocab):
for word, _ in other_vocab.word2count.items():
self.index_word(word)
# remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
logging.info(' word trimming, kept %s / %s = %.4f' % (
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# reinitialize dictionary
self.reset_dictionary()
for word in keep_words:
self.index_word(word)
def get_word_index(self, word):
if word in self.word2index:
return self.word2index[word]
else:
return self.UNK_token
def load_word_vectors(self, pretrained_path, embedding_dim=300):
logging.info(" loading word vectors from '{}'...".format(pretrained_path))
# initialize embeddings to random values for special words
init_sd = 1 / np.sqrt(embedding_dim)
weights = np.random.normal(0, scale=init_sd, size=[self.n_words, embedding_dim])
weights = weights.astype(np.float32)
# read word vectors
word_model = fasttext.load_model(pretrained_path)
for word, id in self.word2index.items():
vec = word_model.get_word_vector(word)
weights[id] = vec
self.word_embedding_weights = weights
def __get_embedding_weight(self, pretrained_path, embedding_dim=300):
""" function modified from http://ronny.rest/blog/post_2017_08_04_glove/ """
logging.info("Loading word embedding '{}'...".format(pretrained_path))
cache_path = os.path.splitext(pretrained_path)[0] + '_cache.pkl'
weights = None
# use cached file if it exists
if os.path.exists(cache_path): #
with open(cache_path, 'rb') as f:
logging.info(' using cached result from {}'.format(cache_path))
weights = pickle.load(f)
if weights.shape != (self.n_words, embedding_dim):
logging.warning(' failed to load word embedding weights. reinitializing...')
weights = None
if weights is None:
# initialize embeddings to random values for special and OOV words
init_sd = 1 / np.sqrt(embedding_dim)
weights = np.random.normal(0, scale=init_sd, size=[self.n_words, embedding_dim])
weights = weights.astype(np.float32)
with open(pretrained_path, encoding="utf-8", mode="r") as textFile:
num_embedded_words = 0
for line_raw in textFile:
# extract the word, and embeddings vector
line = line_raw.split()
try:
word, vector = (line[0], np.array(line[1:], dtype=np.float32))
# if word == 'love': # debugging
# print(word, vector)
# if it is in our vocab, then update the corresponding weights
id = self.word2index.get(word, None)
if id is not None:
weights[id] = vector
num_embedded_words += 1
except ValueError:
logging.info(' parsing error at {}...'.format(line_raw[:50]))
continue
logging.info(' {} / {} word vectors are found in the embedding'.format(num_embedded_words, len(self.word2index)))
with open(cache_path, 'wb') as f:
pickle.dump(weights, f)
return weights
| 4,939 | 36.709924 | 130 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/multimodal_context_net.py
|
import torch
import torch.nn as nn
from model import vocab
import model.embedding_net
from model.tcn import TemporalConvNet
class WavEncoder(nn.Module):
def __init__(self):
super().__init__()
self.feat_extractor = nn.Sequential(
nn.Conv1d(1, 16, 15, stride=5, padding=1600),
nn.BatchNorm1d(16),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(16, 32, 15, stride=6),
nn.BatchNorm1d(32),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(32, 64, 15, stride=6),
nn.BatchNorm1d(64),
nn.LeakyReLU(0.3, inplace=True),
nn.Conv1d(64, 32, 15, stride=6),
)
def forward(self, wav_data):
wav_data = wav_data.unsqueeze(1) # add channel dim
out = self.feat_extractor(wav_data)
return out.transpose(1, 2) # to (batch x seq x dim)
class TextEncoderTCN(nn.Module):
""" based on https://github.com/locuslab/TCN/blob/master/TCN/word_cnn/model.py """
def __init__(self, args, n_words, embed_size=300, pre_trained_embedding=None,
kernel_size=2, dropout=0.3, emb_dropout=0.1):
super(TextEncoderTCN, self).__init__()
if pre_trained_embedding is not None: # use pre-trained embedding (fasttext)
assert pre_trained_embedding.shape[0] == n_words
assert pre_trained_embedding.shape[1] == embed_size
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(pre_trained_embedding),
freeze=args.freeze_wordembed)
else:
self.embedding = nn.Embedding(n_words, embed_size)
num_channels = [args.hidden_size] * args.n_layers
self.tcn = TemporalConvNet(embed_size, num_channels, kernel_size, dropout=dropout)
self.decoder = nn.Linear(num_channels[-1], 32)
self.drop = nn.Dropout(emb_dropout)
self.emb_dropout = emb_dropout
self.init_weights()
def init_weights(self):
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.normal_(0, 0.01)
def forward(self, input):
emb = self.drop(self.embedding(input))
y = self.tcn(emb.transpose(1, 2)).transpose(1, 2)
y = self.decoder(y)
return y.contiguous(), 0
class PoseGenerator(nn.Module):
def __init__(self, args, pose_dim, n_words, word_embed_size, word_embeddings, z_obj=None):
super().__init__()
self.pre_length = args.n_pre_poses
self.gen_length = args.n_poses - args.n_pre_poses
self.z_obj = z_obj
self.input_context = args.input_context
if self.input_context == 'both':
self.in_size = 32 + 32 + pose_dim + 1 # audio_feat + text_feat + last pose + constraint bit
elif self.input_context == 'none':
self.in_size = pose_dim + 1
else:
self.in_size = 32 + pose_dim + 1 # audio or text only
self.audio_encoder = WavEncoder()
self.text_encoder = TextEncoderTCN(args, n_words, word_embed_size, pre_trained_embedding=word_embeddings,
dropout=args.dropout_prob)
self.speaker_embedding = None
if self.z_obj:
self.z_size = 16
self.in_size += self.z_size
if isinstance(self.z_obj, vocab.Vocab):
self.speaker_embedding = nn.Sequential(
nn.Embedding(z_obj.n_words, self.z_size),
nn.Linear(self.z_size, self.z_size)
)
self.speaker_mu = nn.Linear(self.z_size, self.z_size)
self.speaker_logvar = nn.Linear(self.z_size, self.z_size)
else:
pass # random noise
self.hidden_size = args.hidden_size
self.gru = nn.GRU(self.in_size, hidden_size=self.hidden_size, num_layers=args.n_layers, batch_first=True,
bidirectional=True, dropout=args.dropout_prob)
self.out = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size//2),
nn.LeakyReLU(True),
nn.Linear(self.hidden_size//2, pose_dim)
)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, pre_seq, in_text, in_audio, vid_indices=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
text_feat_seq = audio_feat_seq = None
if self.input_context != 'none':
# audio
audio_feat_seq = self.audio_encoder(in_audio) # output (bs, n_frames, feat_size)
# text
text_feat_seq, _ = self.text_encoder(in_text)
assert(audio_feat_seq.shape[1] == text_feat_seq.shape[1])
# z vector; speaker embedding or random noise
if self.z_obj:
if self.speaker_embedding:
assert vid_indices is not None
z_context = self.speaker_embedding(vid_indices)
z_mu = self.speaker_mu(z_context)
z_logvar = self.speaker_logvar(z_context)
z_context = model.embedding_net.reparameterize(z_mu, z_logvar)
else:
z_mu = z_logvar = None
z_context = torch.randn(in_text.shape[0], self.z_size, device=in_text.device)
else:
z_mu = z_logvar = None
z_context = None
if self.input_context == 'both':
in_data = torch.cat((pre_seq, audio_feat_seq, text_feat_seq), dim=2)
elif self.input_context == 'audio':
in_data = torch.cat((pre_seq, audio_feat_seq), dim=2)
elif self.input_context == 'text':
in_data = torch.cat((pre_seq, text_feat_seq), dim=2)
elif self.input_context == 'none':
in_data = pre_seq
else:
assert False
if z_context is not None:
repeated_z = z_context.unsqueeze(1)
repeated_z = repeated_z.repeat(1, in_data.shape[1], 1)
in_data = torch.cat((in_data, repeated_z), dim=2)
output, decoder_hidden = self.gru(in_data, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
output = self.out(output.reshape(-1, output.shape[2]))
decoder_outputs = output.reshape(in_data.shape[0], in_data.shape[1], -1)
return decoder_outputs, z_context, z_mu, z_logvar
class Discriminator(nn.Module):
def __init__(self, args, input_size, n_words=None, word_embed_size=None, word_embeddings=None):
super().__init__()
self.input_size = input_size
if n_words and word_embed_size:
self.text_encoder = TextEncoderTCN(n_words, word_embed_size, word_embeddings)
input_size += 32
else:
self.text_encoder = None
self.hidden_size = args.hidden_size
self.gru = nn.GRU(input_size, hidden_size=self.hidden_size, num_layers=args.n_layers, bidirectional=True,
dropout=args.dropout_prob, batch_first=True)
self.out = nn.Linear(self.hidden_size, 1)
self.out2 = nn.Linear(args.n_poses, 1)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, poses, in_text=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
if self.text_encoder:
text_feat_seq, _ = self.text_encoder(in_text)
poses = torch.cat((poses, text_feat_seq), dim=2)
output, decoder_hidden = self.gru(poses, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
# use the last N outputs
batch_size = poses.shape[0]
output = output.contiguous().view(-1, output.shape[2])
output = self.out(output) # apply linear to every output
output = output.view(batch_size, -1)
output = self.out2(output)
output = torch.sigmoid(output)
return output
class ConvDiscriminator(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_size = input_size
self.hidden_size = 64
self.pre_conv = nn.Sequential(
nn.Conv1d(input_size, 16, 3),
nn.BatchNorm1d(16),
nn.LeakyReLU(True),
nn.Conv1d(16, 8, 3),
nn.BatchNorm1d(8),
nn.LeakyReLU(True),
nn.Conv1d(8, 8, 3),
)
self.gru = nn.GRU(8, hidden_size=self.hidden_size, num_layers=4, bidirectional=True,
dropout=0.3, batch_first=True)
self.out = nn.Linear(self.hidden_size, 1)
self.out2 = nn.Linear(28, 1)
self.do_flatten_parameters = False
if torch.cuda.device_count() > 1:
self.do_flatten_parameters = True
def forward(self, poses, in_text=None):
decoder_hidden = None
if self.do_flatten_parameters:
self.gru.flatten_parameters()
poses = poses.transpose(1, 2)
feat = self.pre_conv(poses)
feat = feat.transpose(1, 2)
output, decoder_hidden = self.gru(feat, decoder_hidden)
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # sum bidirectional outputs
# use the last N outputs
batch_size = poses.shape[0]
output = output.contiguous().view(-1, output.shape[2])
output = self.out(output) # apply linear to every output
output = output.view(batch_size, -1)
output = self.out2(output)
output = torch.sigmoid(output)
return output
| 9,831 | 37.86166 | 113 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/speech2gesture.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
""" reimplement speech2gesture model(https://github.com/amirbar/speech2gesture) with pytorch """
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
from https://github.com/mlperf/inference/blob/482f6a3beb7af2fb0bd2d91d6185d5e71c22c55f/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "SAME")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation,
groups=self.groups,
)
class Conv1d_tf(nn.Conv1d):
"""
Conv1d with the padding behavior from TF
modified from https://github.com/mlperf/inference/blob/482f6a3beb7af2fb0bd2d91d6185d5e71c22c55f/others/edge/object_detection/ssd_mobilenet/pytorch/utils.py
"""
def __init__(self, *args, **kwargs):
super(Conv1d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get("padding", "SAME")
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(
0, (out_size - 1) * self.stride[dim] + effective_filter_size - input_size
)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == "VALID":
return F.conv1d(
input,
self.weight,
self.bias,
self.stride,
padding=0,
dilation=self.dilation,
groups=self.groups,
)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
if rows_odd:
input = F.pad(input, [0, rows_odd])
return F.conv1d(
input,
self.weight,
self.bias,
self.stride,
padding=(padding_rows // 2),
dilation=self.dilation,
groups=self.groups,
)
def ConvNormRelu(in_channels, out_channels, type='1d', downsample=False, k=None, s=None, padding='SAME'):
if k is None and s is None:
if not downsample:
k = 3
s = 1
else:
k = 4
s = 2
if type == '1d':
conv_block = Conv1d_tf(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm1d(out_channels)
elif type == '2d':
conv_block = Conv2d_tf(in_channels, out_channels, kernel_size=k, stride=s, padding=padding)
norm_block = nn.BatchNorm2d(out_channels)
else:
assert False
return nn.Sequential(
conv_block,
norm_block,
nn.LeakyReLU(0.2, True)
)
class UnetUp(nn.Module):
def __init__(self, in_ch, out_ch):
super(UnetUp, self).__init__()
self.conv = ConvNormRelu(in_ch, out_ch)
def forward(self, x1, x2):
x1 = torch.repeat_interleave(x1, 2, dim=2)
x1 = x1[:, :, :x2.shape[2]] # to match dim
x = x1 + x2 # it is different to the original UNET, but I stick to speech2gesture implementation
x = self.conv(x)
return x
class AudioEncoder(nn.Module):
def __init__(self, n_frames):
super().__init__()
self.n_frames = n_frames
self.first_net = nn.Sequential(
ConvNormRelu(1, 64, '2d', False),
ConvNormRelu(64, 64, '2d', True),
ConvNormRelu(64, 128, '2d', False),
ConvNormRelu(128, 128, '2d', True),
ConvNormRelu(128, 256, '2d', False),
ConvNormRelu(256, 256, '2d', True),
ConvNormRelu(256, 256, '2d', False),
ConvNormRelu(256, 256, '2d', False, padding='VALID')
)
self.make_1d = torch.nn.Upsample((n_frames, 1), mode='bilinear', align_corners=False)
self.down1 = nn.Sequential(
ConvNormRelu(256, 256, '1d', False),
ConvNormRelu(256, 256, '1d', False)
)
self.down2 = ConvNormRelu(256, 256, '1d', True)
self.down3 = ConvNormRelu(256, 256, '1d', True)
self.down4 = ConvNormRelu(256, 256, '1d', True)
self.down5 = ConvNormRelu(256, 256, '1d', True)
self.down6 = ConvNormRelu(256, 256, '1d', True)
self.up1 = UnetUp(256, 256)
self.up2 = UnetUp(256, 256)
self.up3 = UnetUp(256, 256)
self.up4 = UnetUp(256, 256)
self.up5 = UnetUp(256, 256)
def forward(self, spectrogram):
spectrogram = spectrogram.unsqueeze(1) # add channel dim
# print(spectrogram.shape)
spectrogram = spectrogram.float()
out = self.first_net(spectrogram)
out = self.make_1d(out)
x1 = out.squeeze(3)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
x7 = self.down6(x6)
x = self.up1(x7, x6)
x = self.up2(x, x5)
x = self.up3(x, x4)
x = self.up4(x, x3)
x = self.up5(x, x2)
return x
class Generator(nn.Module):
def __init__(self, n_poses, pose_dim, n_pre_poses):
super().__init__()
self.gen_length = n_poses
self.audio_encoder = AudioEncoder(n_poses)
self.pre_pose_encoder = nn.Sequential(
nn.Linear(n_pre_poses * pose_dim, 32),
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.Linear(32, 16)
)
self.decoder = nn.Sequential(
ConvNormRelu(256 + 16, 256),
ConvNormRelu(256, 256),
ConvNormRelu(256, 256),
ConvNormRelu(256, 256)
)
self.final_out = nn.Conv1d(256, pose_dim, 1, 1)
def forward(self, in_spec, pre_poses):
audio_feat_seq = self.audio_encoder(in_spec) # output (bs, feat_size, n_frames)
pre_poses = pre_poses.reshape(pre_poses.shape[0], -1)
pre_pose_feat = self.pre_pose_encoder(pre_poses) # output (bs, 16)
pre_pose_feat = pre_pose_feat.unsqueeze(2).repeat(1, 1, self.gen_length)
feat = torch.cat((audio_feat_seq, pre_pose_feat), dim=1)
out = self.decoder(feat)
out = self.final_out(out)
out = out.transpose(1, 2) # to (batch, seq, dim)
return out
class Discriminator(nn.Module):
def __init__(self, pose_dim):
super().__init__()
self.net = nn.Sequential(
Conv1d_tf(pose_dim, 64, kernel_size=4, stride=2, padding='SAME'),
nn.LeakyReLU(0.2, True),
ConvNormRelu(64, 128, '1d', True),
ConvNormRelu(128, 256, '1d', k=4, s=1),
Conv1d_tf(256, 1, kernel_size=4, stride=1, padding='SAME'),
)
def forward(self, x):
x = x[:, 1:] - x[:, :-1] # pose differences
x = x.transpose(1, 2) # to (batch, dim, seq)
out = self.net(x)
return out
if __name__ == '__main__':
# for model debugging
pose_dim = 16
generator = Generator(64, pose_dim, 4)
spec = torch.randn((4, 128, 64))
pre_poses = torch.randn((4, 4, pose_dim))
generated = generator(spec, pre_poses)
print('spectrogram', spec.shape)
print('output', generated.shape)
discriminator = Discriminator(pose_dim)
out = discriminator(generated)
print('discrimination output', out.shape)
| 8,841 | 32.116105 | 159 |
py
|
Gesture-Generation-from-Trimodal-Context
|
Gesture-Generation-from-Trimodal-Context-master/scripts/model/tcn.py
|
""" from https://github.com/locuslab/TCN/blob/master/TCN/tcn.py """
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 2,536 | 38.030769 | 110 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/unittest_mf_gp_bandit.py
|
"""
Unit tests for MF-GP-Bandits
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=maybe-no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local
from examples.synthetic_functions import get_mf_hartmann_as_mfof
from gen_mfgp_sample import gen_simple_mfgp_as_mfof
from mf_func import get_noisy_mfof_from_mfof
import mf_gp_bandit
from mf_gpb_utils import are_opt_fidel_queries
from unittest_mf_func import get_mf_func_data
from utils.ancillary_utils import is_non_decreasing_sequence
from utils.base_test_class import BaseTestClass, execute_tests
import utils.reporters as reporters
from utils import option_handler
# Generate data
def _get_gpb_instances():
""" Generates some GPB problems and MFGPBandit instances. """
instances = get_mf_func_data()
for inst in instances:
inst.mfgpb = mf_gp_bandit.MFGPBandit(inst.mfof)
return instances
def _get_gpb_problem():
""" Generates one bandit problem and returns. """
problems = get_mf_func_data()
ret = problems[0]
ret.reporter = reporters.SilentReporter()
return ret
class MFGPBanditTestCase(BaseTestClass):
""" Unit tests for mf_gpb_utils.py """
def setUp(self):
""" Sets up unit tests. """
pass
def test_initial_sampling(self):
""" Test for initialisation sampling. """
self.report('Testing sample initialisation.')
prob = _get_gpb_problem()
acquisitions = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
options = option_handler.load_options(mf_gp_bandit.all_mf_gp_bandit_args,
reporter=prob.reporter)
for acq in acquisitions:
options.acq = acq
options.gpb_init_capital = prob.mfof.opt_fidel_cost * 23.2
mfgpb = mf_gp_bandit.MFGPBandit(prob.mfof, options, prob.reporter)
mfgpb.optimise_initialise()
hf_idxs = are_opt_fidel_queries(mfgpb.history.query_fidels, prob.mfof.opt_fidel)
hf_vals = mfgpb.history.query_vals[hf_idxs]
num_hf_queries = len(hf_vals)
self.report(('Initialised %s with %d queries (%d at opt_fidel). Init capital = ' +
'%0.4f (%0.4f used) ')%(acq, len(mfgpb.history.query_vals),
num_hf_queries, options.gpb_init_capital, mfgpb.spent_capital),
'test_result')
assert mfgpb.spent_capital <= 1.1 * options.gpb_init_capital
assert mfgpb.history.curr_opt_vals[-1] == mfgpb.gpb_opt_val
assert is_non_decreasing_sequence(mfgpb.history.curr_opt_vals)
assert num_hf_queries == 0 or hf_vals.max() == mfgpb.gpb_opt_val
assert mfgpb.num_opt_fidel_queries == num_hf_queries
assert mfgpb.history.query_at_opt_fidel.sum() == num_hf_queries
def test_gpb_opt_1(self):
""" Tests the optimisaiton routine. """
# pylint: disable=bad-whitespace
self.report('Tests mf-gp-ucb using a sample from gen_mfgp_sample_as_mfof.')
mfof = gen_simple_mfgp_as_mfof(random_seed=np.random.randint(1000))
mfof.init_mfgp = mfof.mfgp
# Also get the noisy mfof
nmfof = get_noisy_mfof_from_mfof(mfof, mfof.mfgp.noise_var)
method_data = [('gp_ucb', 20 * mfof.opt_fidel_cost),
('gp_ei', 20 * mfof.opt_fidel_cost),
('mf_gp_ucb', 20 * mfof.opt_fidel_cost)]
for meth in method_data:
opt_pt, opt_val, _ = mf_gp_bandit.mfgpb_from_mfoptfunc(mfof, meth[1], acq=meth[0],
reporter='silent')
report_str = ('%s:: capital: %0.1f, opt_pt: %s, opt_val: %0.4f, ' +
'true_opt: %s, true opt_val: %0.4f')%(meth[0], meth[1], str(opt_pt),
opt_val, str(mfof.opt_pt), mfof.opt_val)
self.report(report_str, 'test_result')
# Now do the noisy version
noisy_opt_pt, noisy_opt_val, _ = mf_gp_bandit.mfgpb_from_mfoptfunc(nmfof, meth[1],
acq=meth[0], reporter='silent')
if noisy_opt_val < np.inf:
fval_at_noisy_opt_pt = mfof.eval_single(mfof.opt_fidel, noisy_opt_pt)
else:
fval_at_noisy_opt_pt = np.inf
noisy_report_str = ('Noisy %s:: noise: %0.4f, opt_pt: %s, opt_val: %0.4f, ' +
'fval_at_noisy_opt_pt: %0.4f')%(meth[0], nmfof.noise_var,
str(noisy_opt_pt), noisy_opt_val, fval_at_noisy_opt_pt)
self.report(noisy_report_str, 'test_result')
def test_gpb_opt_2(self):
""" Tests the optimisaiton routine. """
# pylint: disable=bad-whitespace
self.report('Tests mf-gp-ucb using the hartmann function while learning kernel.')
mfof = get_mf_hartmann_as_mfof(2, 3)
noise_var = 0.1
nmfof = get_noisy_mfof_from_mfof(mfof, noise_var)
method_data = [('gp_ucb', 20 * mfof.opt_fidel_cost),
('gp_ei', 20 * mfof.opt_fidel_cost),
('mf_gp_ucb', 20 * mfof.opt_fidel_cost)]
for meth in method_data:
_, opt_val, history = mf_gp_bandit.mfgpb_from_mfoptfunc(mfof, meth[1],
acq=meth[0], reporter='silent')
num_opt_fidel_queries = history.query_at_opt_fidel.sum()
total_num_queries = len(history.query_at_opt_fidel)
report_str = ('%s:: capital: %0.4f, opt_val: %0.4f, true opt_val: %0.4f, ' +
'queries(at-opt_fidel): %d(%d).')%(meth[0], meth[1], opt_val,
mfof.opt_val, total_num_queries, num_opt_fidel_queries)
self.report(report_str, 'test_result')
# Now do the noisy version
noisy_opt_pt, noisy_opt_val, noisy_history = mf_gp_bandit.mfgpb_from_mfoptfunc(
nmfof, meth[1], acq=meth[0], reporter='silent')
noisy_num_opt_fidel_queries = noisy_history.query_at_opt_fidel.sum()
noisy_total_num_queries = len(history.query_at_opt_fidel)
if noisy_opt_val < np.inf:
fval_at_noisy_opt_pt = mfof.eval_single(mfof.opt_fidel, noisy_opt_pt)
else:
fval_at_noisy_opt_pt = np.inf
noisy_report_str = ('Noisy %s:: noise: %0.4f, opt_val: %0.4f, ' +
'fval_at_noisy_opt_pt: %0.4f, ' +
'queries(at-opt_fidel): %d(%d).')%(meth[0], nmfof.noise_var,
noisy_opt_val, fval_at_noisy_opt_pt, noisy_total_num_queries,
noisy_num_opt_fidel_queries)
self.report(noisy_report_str, 'test_result')
if __name__ == '__main__':
execute_tests()
| 6,459 | 43.551724 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/demo_mf_gp_bandit.py
|
"""
A Demo for MF-GP-Bandit
"""
# pylint: disable=invalid-name
# Local
from synthetic_functions import get_mf_hartmann_as_mfof
from mf_gp_bandit import mfgpb_from_mfoptfunc
# methods = ['gp_ucb', 'gp_ei', 'mf_gp_ucb']
methods = ['mf_gp_ucb']
fidel_dim = 2
domain_dim = 6
num_max_hf_queries = 200
# fidel_dim = 1
# domain_dim = 3
# num_max_hf_queries = 100
def main():
""" Main function. """
mfof = get_mf_hartmann_as_mfof(fidel_dim, domain_dim)
capital = num_max_hf_queries * mfof.opt_fidel_cost
# Execute each method
for meth in methods:
print('Method:: %s, opt-val: %0.4f\n============================================='%(
meth, mfof.opt_val))
mfgpb_from_mfoptfunc(mfof, capital, meth)
if __name__ == '__main__':
main()
| 761 | 20.771429 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/mf_gpb_utils.py
|
"""
A collection of utilities for MF-GP Bandits.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
# pylint: disable=no-name-in-module
from argparse import Namespace
from copy import deepcopy
import numpy as np
from scipy.stats import norm
from scratch.get_finite_fidel_mfof import mf_sko_fidel_chooser_single
def latin_hc_indices(dim, num_samples):
""" Obtains indices for Latin Hyper-cube sampling. """
index_set = [list(range(num_samples))] * dim
lhs_indices = []
for i in range(num_samples):
curr_idx_idx = np.random.randint(num_samples-i, size=dim)
curr_idx = [index_set[j][curr_idx_idx[j]] for j in range(dim)]
index_set = [index_set[j][:curr_idx_idx[j]] + index_set[j][curr_idx_idx[j]+1:]
for j in range(dim)]
lhs_indices.append(curr_idx)
return lhs_indices
def latin_hc_sampling(dim, num_samples):
""" Latin Hyper-cube sampling in the unit hyper-cube. """
if num_samples == 0:
return np.zeros((0, dim))
elif num_samples == 1:
return 0.5 * np.ones((1, dim))
lhs_lower_boundaries = (np.linspace(0, 1, num_samples+1)[:num_samples]).reshape(1, -1)
width = lhs_lower_boundaries[0][1] - lhs_lower_boundaries[0][0]
lhs_lower_boundaries = np.repeat(lhs_lower_boundaries, dim, axis=0).T
lhs_indices = latin_hc_indices(dim, num_samples)
lhs_sample_boundaries = []
for i in range(num_samples):
curr_idx = lhs_indices[i]
curr_sample_boundaries = [lhs_lower_boundaries[curr_idx[j]][j] for j in range(dim)]
lhs_sample_boundaries.append(curr_sample_boundaries)
lhs_sample_boundaries = np.array(lhs_sample_boundaries)
uni_random_width = width * np.random.random((num_samples, dim))
lhs_samples = lhs_sample_boundaries + uni_random_width
return lhs_samples
def is_an_opt_fidel_query(query_fidel, opt_fidel):
""" Returns true if query_fidels are at opt_fidel. """
return np.linalg.norm(query_fidel - opt_fidel) < 1e-5
def are_opt_fidel_queries(query_fidels, opt_fidel):
""" Returns a boolean list which is True if at opt_fidel. """
return np.array([is_an_opt_fidel_query(qf, opt_fidel) for qf in query_fidels])
# Functions for acqusitions. ------------------------------------------------------------
def _mf_gp_ucb_single(dom_pt, mfgp, opt_fidel, time_step):
""" MF-GP-UCB acquisition function for evaluation at a single point.
dom_pt: The point at which we want to evaluate the acquisition
mfgp: An MFGP object.
opt_fidel: The fidelity at which the optimisation needs to occur.
time_step: The current time step of the acquisition.
"""
ucb, beta_th = _mf_gp_ucb_multiple(dom_pt.reshape(1, -1), mfgp, opt_fidel, time_step)
return float(ucb), beta_th
def _mf_gp_ucb_multiple(dom_pts, mfgp, opt_fidel, time_step):
""" MF-GP-UCB acquisition function for evaluation at multiple points. """
eff_l1_boundary = max(10,
mfgp.domain_kernel.get_effective_norm(np.ones(mfgp.domain_dim), order=1))
beta_t = 0.5 * mfgp.domain_dim * np.log(2 * time_step * eff_l1_boundary + 1)
beta_th = np.clip(np.sqrt(beta_t), 3, 20)
opt_fidel_m = np.repeat(opt_fidel.reshape(1, -1), len(dom_pts), axis=0)
mu, sigma = mfgp.eval_at_fidel(opt_fidel_m, dom_pts, uncert_form='std')
ucb = mu + beta_th * sigma
return ucb, beta_th
def _mf_gp_ucb(acq_query_type, dom_pts, mfgp, opt_fidel, time_step):
""" Wrapper for either _mf_gp_ucb_single or _mf_gp_ucb multiple. """
if acq_query_type == 'single':
return _mf_gp_ucb_single(dom_pts, mfgp, opt_fidel, time_step)
elif acq_query_type == 'multiple':
return _mf_gp_ucb_multiple(dom_pts, mfgp, opt_fidel, time_step)
else:
raise ValueError('acq_query_type should be \'single\' or \'multiple\'. Given, ' +
'\'%s\' unrecognised.'%(acq_query_type))
def _gp_ei_single(dom_pt, mfgp, opt_fidel, curr_best):
""" GP-EI Acquisition evaluated at a single point. """
gpei_val = _gp_ei_multiple(dom_pt.reshape(1, -1), mfgp, opt_fidel, curr_best)
return float(gpei_val)
def _gp_ei_multiple(dom_pts, mfgp, opt_fidel, curr_best):
""" GP-EI Acquisition evaluated at multiple points. """
# pylint: disable=unused-argument
opt_fidel_m = np.repeat(opt_fidel.reshape(1, -1), len(dom_pts), axis=0)
mu, sigma = mfgp.eval_at_fidel(opt_fidel_m, dom_pts, uncert_form='std')
Z = (mu - curr_best) / sigma
ei = (mu - curr_best)*norm.cdf(Z) + sigma*norm.pdf(Z)
return ei
def _gp_ei(acq_query_type, dom_pts, mfgp, opt_fidel, curr_best):
""" Wrapper for either _mf_gp_ei_single or _mf_gp_ei multiple. """
if acq_query_type == 'single':
return _gp_ei_single(dom_pts, mfgp, opt_fidel, curr_best)
elif acq_query_type == 'multiple':
return _gp_ei_multiple(dom_pts, mfgp, opt_fidel, curr_best)
else:
raise ValueError('acq_query_type should be \'single\' or \'multiple\'. Given, ' +
'\'%s\' unrecognised.'%(acq_query_type))
acquisitions = Namespace(
# MF-GP-UCB
mf_gp_ucb=_mf_gp_ucb,
mf_gp_ucb_single=_mf_gp_ucb_single,
mf_gp_ucb_multiple=_mf_gp_ucb_multiple,
# GP-UCB
gp_ucb=_mf_gp_ucb, # The acquisitions for gp-ucb are the same
gp_ucb_single=_mf_gp_ucb_single,
gp_ucb_multiple=_mf_gp_ucb_multiple,
# GP-EI
gp_ei=_gp_ei,
gp_ei_single=_gp_ei_single,
gp_ei_multiple=_gp_ei_multiple,
)
# Functions for determining next fidel. --------------------------------------------------
def _mf_gp_ucb_fidel_chooser_single(next_pt, mfgp, mfof, acq_params):
""" Function to determine the next fidelity for MF-GP-UCB.
next_pt: The next point in the domain at which we will evaluate the function.
mfgp: An MFGP object.
mfof: An MFOptFunction object.
time_step = current time step
"""
# pylint: disable=too-many-locals
cand_fidels = mfof.get_candidate_fidelities(filter_by_cost=True)
num_cand_fidels = len(cand_fidels)
cand_fidel_cost_ratios = mfof.get_cost_ratio(cand_fidels)
opt_fidel_mat = np.repeat(mfof.opt_fidel.reshape(1, -1), num_cand_fidels, axis=0)
cand_fidel_slacks = mfgp.fidel_kernel.compute_std_slack(opt_fidel_mat, cand_fidels)
cand_fidel_diffs = np.linalg.norm(opt_fidel_mat - cand_fidels, axis=1)
# Only select points with high standard deviation
next_pt_mat = np.repeat(next_pt.reshape(1, -1), num_cand_fidels, axis=0)
_, cand_fidel_stds = mfgp.eval_at_fidel(cand_fidels, next_pt_mat, uncert_form='std')
cost_ratio_power = 1/float(mfgp.fidel_dim + mfgp.domain_dim + 2)
std_thresholds = acq_params.thresh_coeff * ((cand_fidel_cost_ratios ** cost_ratio_power)
* cand_fidel_slacks)
high_std_idxs = cand_fidel_stds > std_thresholds
# Only slect points that are far enough from opt_fidel
eps_t = np.clip(1/acq_params.beta_th, 0.001, 0.2)
diam_slack = mfgp.fidel_kernel.compute_std_slack(np.zeros((1, mfgp.fidel_dim)),
np.ones((1, mfgp.fidel_dim)))
far_away_idxs = cand_fidel_slacks > eps_t * diam_slack
# print(far_away_idxs.mean())
# far_away_idxs = cand_fidel_diffs > eps_t * np.sqrt(mfgp.fidel_dim)
# Now filter
sel_idxs = high_std_idxs * far_away_idxs
if sel_idxs.sum() == 0:
return deepcopy(mfof.opt_fidel)
else:
sel_fidels = cand_fidels[sel_idxs]
sel_cost_ratios = cand_fidel_cost_ratios[sel_idxs]
min_cost_idx = sel_cost_ratios.argmin()
next_fidel = sel_fidels[min_cost_idx]
return next_fidel
def _opt_fidel_chooser_single(next_pt, mfgp, mfof, acq_params):
""" Always returns the optimum fidelity.
"""
# pylint: disable=unused-argument
return deepcopy(mfof.opt_fidel)
fidelity_choosers = Namespace(
# MF-GP-UCB
mf_gp_ucb=_mf_gp_ucb_fidel_chooser_single,
mf_gp_ucb_single=_mf_gp_ucb_fidel_chooser_single,
# GP-UCB
gp_ucb=_opt_fidel_chooser_single,
gp_ucb_single=_opt_fidel_chooser_single,
# GP-EI
gp_ei=_opt_fidel_chooser_single,
gp_ei_single=_opt_fidel_chooser_single,
# MF-SKO
mf_sko=mf_sko_fidel_chooser_single,
mf_sko_single=mf_sko_fidel_chooser_single,
)
| 8,112 | 40.182741 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/mf_gp.py
|
"""
Implements the kernel, GP and fitter for multi-fidelity GPs.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from gp.kernel import CoordinateProductKernel, PolyKernel, SEKernel
from gp.gp_core import GP, GPFitter, mandatory_gp_args
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter
from utils.ancillary_utils import get_list_of_floats_as_str
# Define hyper-parameters for Multi-fidelity GPs.
mf_gp_args = [
# Fidelity kernel
get_option_specs('fidel_kernel_type', False, 'se',
'Type of kernel for the fidelity space. Should be se or poly'),
get_option_specs('fidel_use_same_bandwidth', False, False,
('If true, will use same bandwidth on all fidelity dimensions. Applies only when '
'fidel_kernel_type is se. Default=False.')),
get_option_specs('fidel_use_same_scalings', False, False,
('If true, will use same scaling on all fidelity dimensions. Applies only when '
'fidel_kernel_type is poly. Default=False.')),
get_option_specs('fidel_poly_order', False, 1,
('Order of the polynomial for the fidelity kernel. Default = 1 (linear kernel)')),
# Domain kernel
get_option_specs('domain_kernel_type', False, 'se',
'Type of kernel for the domain. Should be se or poly'),
get_option_specs('domain_use_same_bandwidth', False, False,
('If true, will use same bandwidth on all domain dimensions. Applies only when '
'domain_kernel_type is se. Default=False.')),
get_option_specs('domain_use_same_scalings', False, False,
('If true, will use same scaling on all domain dimensions. Applies only when '
'domain_kernel_type is poly. Default=False.')),
get_option_specs('domain_poly_order', False, 1,
('Order of the polynomial for the domain kernel. Default = 1 (linear kernel)')),
# Mean function
get_option_specs('mean_func_type', False, 'median',
('Specify the type of mean function. Should be upper_bound, mean, median, const ',
'or zero. If const, specifcy value in mean-func-const.')),
get_option_specs('mean_func_const', False, 0.0,
'The constant value to use if mean_func_type is const.'),
# Kernel scale
get_option_specs('kernel_scale_type', False, 'tune',
('Specify how to obtain the kernel scale. Should be tune, label or value. Specify '
'appropriate value in kernel_scale_label or kernel_scale_value')),
get_option_specs('kernel_scale_label', False, 2,
'The fraction of label variance to use as noise variance.'),
get_option_specs('kernel_scale_value', False, 1,
'The (absolute) value to use as noise variance.'),
# Noise variance
get_option_specs('noise_var_type', False, 'tune',
('Specify how to obtain the noise variance. Should be tune, label or value. Specify '
'appropriate value in noise_var_label or noise_var_value')),
get_option_specs('noise_var_label', False, 0.05,
'The fraction of label variance to use as noise variance.'),
get_option_specs('noise_var_value', False, 0.1,
'The (absolute) value to use as noise variance.'),
]
# Define this which includes the mandatory GP args
all_mf_gp_args = mandatory_gp_args + mf_gp_args
class MFGP(GP):
""" A GP to be used for multi-fidelity optimisation. """
def __init__(self, ZX, YY, fidel_coords, domain_coords,
kernel_scale, fidel_kernel, domain_kernel,
mean_func, noise_var, *args, **kwargs):
""" Constructor. ZZ, XX, YY are the fidelity points, domain points and labels
respectively.
"""
self.fidel_coords = fidel_coords
self.domain_coords = domain_coords
self.fidel_dim = len(fidel_coords)
self.domain_dim = len(domain_coords)
self.fidel_kernel = fidel_kernel
self.domain_kernel = domain_kernel
# Construct coordinate product kernel
mf_kernel = CoordinateProductKernel(self.fidel_dim + self.domain_dim, kernel_scale,
[self.fidel_kernel, self.domain_kernel],
[self.fidel_coords, self.domain_coords],
)
# Call super constructor
super(MFGP, self).__init__(ZX, YY, mf_kernel, mean_func, noise_var, *args, **kwargs)
def get_domain_pts(self, data_idxs=None):
""" Returns only the domain points. """
data_idxs = data_idxs if not data_idxs is None else range(self.num_tr_data)
return self.ZX[data_idxs, self.domain_coords]
def get_fidel_pts(self, data_idxs=None):
""" Returns only the fidelity points. """
data_idxs = data_idxs if not data_idxs is None else range(self.num_tr_data)
return self.ZX[data_idxs, self.fidel_coords]
def _get_ZX_from_ZZ_XX(self, ZZ, XX):
""" Gets the coordinates in the joint space from the individual fidelity and
domain spaces. """
if ZZ.shape[1] != self.fidel_dim or XX.shape[1] != self.domain_dim:
raise ValueError('ZZ, XX dimensions should be (%d, %d). Given (%d, %d)'%(
self.fidel_dim, self.domain_dim, ZZ.shape[1], XX.shape[1]))
ZX_unordered = np.concatenate((ZZ, XX), axis=1)
ordering = np.argsort(self.fidel_coords + self.domain_coords)
return ZX_unordered[:, ordering]
def eval_at_fidel(self, ZZ_test, XX_test, *args, **kwargs):
""" Evaluates the GP at [ZZ, XX]. Read eval in gp_core.GP for more details. """
ZX_test = self._get_ZX_from_ZZ_XX(ZZ_test, XX_test)
return self.eval(ZX_test, *args, **kwargs)
def add_mf_data(self, ZZ_new, XX_new, YY_new, *args, **kwargs):
""" Adds new data to the multi-fidelity GP. """
ZX_new = self._get_ZX_from_ZZ_XX(ZZ_new, XX_new)
self.add_data(ZX_new, YY_new, *args, **kwargs)
def draw_mf_samples(self, num_samples, ZZ_test=None, XX_test=None, *args, **kwargs):
""" Draws samples from a multi-fidelity GP. """
ZX_test = None if ZZ_test is None else self._get_ZX_from_ZZ_XX(ZZ_test, XX_test)
return self.draw_samples(num_samples, ZX_test, *args, **kwargs)
def __str__(self):
""" Returns a string representation of the MF-GP. """
fidel_ke_str = self._get_kernel_str(self.fidel_kernel)
domain_ke_str = self._get_kernel_str(self.domain_kernel)
ret = 'noise: %0.4f, scale: %0.3f, fid: %s, dom: %s'%(self.noise_var,
self.kernel.scale, fidel_ke_str, domain_ke_str)
return ret
@classmethod
def _get_kernel_str(cls, kern):
""" Gets a string format of the kernel depending on whether it is SE/Poly."""
if isinstance(kern, SEKernel):
hp_name = 'dim_bandwidths'
kern_name = 'se'
elif isinstance(kern, PolyKernel):
hp_name = 'dim_scalings'
kern_name = 'poly'
if kern.dim > 4:
ret = '%0.4f(avg)'%(kern.hyperparams[hp_name].mean())
else:
ret = get_list_of_floats_as_str(kern.hyperparams[hp_name])
ret = kern_name + '-' + ret
return ret
def get_mfgp_from_fidel_domain(ZZ, XX, YY, kernel_scale, fidel_kernel,
domain_kernel, mean_func, noise_var, *args, **kwargs):
# pylint: disable=too-many-locals
""" A wrapper which concatenates the ZZ and XX and returns an MFGP object. """
fidel_dim = ZZ.shape[1]
domain_dim = XX.shape[1]
fidel_coords = range(fidel_dim)
domain_coords = range(fidel_dim, fidel_dim + domain_dim)
ZX = np.concatenate((ZZ, XX), axis=1)
return MFGP(ZX, YY, fidel_coords, domain_coords,
kernel_scale, fidel_kernel, domain_kernel,
mean_func, noise_var, *args, **kwargs)
class MFGPFitter(GPFitter):
""" A fitter for GPs in multi-fidelity optimisation. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, ZZ, XX, YY, options=None, reporter=None):
""" Constructor. options should either be a Namespace, a list or None"""
reporter = get_reporter(reporter)
if options is None:
options = load_options(all_mf_gp_args, 'MF-GP', reporter)
self.ZZ = ZZ
self.XX = XX
self.YY = YY
self.ZX = np.concatenate((self.ZZ, self.XX), axis=1)
self.fidel_dim = self.ZZ.shape[1]
self.domain_dim = self.XX.shape[1]
self.input_dim = self.fidel_dim + self.domain_dim
super(MFGPFitter, self).__init__(options, reporter)
# Child Set up Methods
# ======================================================================================
def _child_set_up(self):
""" Sets parameters for GPFitter. """
# Check args - so that we don't have to keep doing this all the time
if not self.options.fidel_kernel_type in ['se', 'poly']:
raise ValueError('Unknown fidel_kernel_type. Should be either se or poly.')
if not self.options.domain_kernel_type in ['se', 'poly']:
raise ValueError('Unknown domain_kernel_type. Should be either se or poly.')
if not self.options.noise_var_type in ['tune', 'label', 'value']:
raise ValueError('Unknown noise_var_type. Should be either tune, label or value.')
if not self.options.mean_func_type in ['mean', 'median', 'const', 'zero',
'upper_bound']:
raise ValueError(('Unknown mean_func_type. Should be one of ',
'mean/median/const/zero.'))
# Set some parameters we will be using often.
self.Y_var = self.YY.std()**2
self.ZX_std_norm = np.linalg.norm(self.ZX.std(axis=0))
# Bounds for the hyper parameters
# -------------------------------
self.hp_bounds = []
# Noise variance
if self.options.noise_var_type == 'tune':
self.noise_var_log_bounds = [np.log(0.005 * self.Y_var), np.log(0.2 * self.Y_var)]
self.hp_bounds.append(self.noise_var_log_bounds)
# Kernel scale
self.scale_log_bounds = [np.log(0.1 * self.Y_var), np.log(10 * self.Y_var)]
self.hp_bounds.append(self.scale_log_bounds)
# Fidelity kernel
if self.options.fidel_kernel_type == 'se':
self._fidel_se_kernel_setup()
elif self.options.fidel_kernel_type == 'poly':
self._fidel_poly_kernel_setup()
# Domain kernel
if self.options.domain_kernel_type == 'se':
self._domain_se_kernel_setup()
elif self.options.domain_kernel_type == 'poly':
self._domain_pol_kernel_setup()
def _fidel_se_kernel_setup(self):
""" Sets up the fidelity kernel as a SE kernel. """
if (hasattr(self.options, 'fidel_bandwidth_log_bounds') and
self.options.fidel_bandwidth_log_bounds is not None):
self.fidel_bandwidth_log_bounds = self.options.fidel_bandwidth_log_bounds
else:
self.fidel_bandwidth_log_bounds = self._get_se_kernel_bounds(
self.fidel_dim, self.ZX_std_norm, self.options.fidel_use_same_bandwidth)
self.hp_bounds.extend(self.fidel_bandwidth_log_bounds)
def _fidel_poly_kernel_setup(self):
""" Sets up the fidelity kernel as a Poly kernel. """
self.fidel_scaling_log_bounds = self._get_poly_kernel_bounds(self.ZX,
self.options.fidel_use_same_scalings)
self.hp_bounds.extend(self.fidel_scaling_log_bounds)
def _domain_se_kernel_setup(self):
""" Sets up the domainity kernel as a SE kernel. """
if (hasattr(self.options, 'domain_bandwidth_log_bounds') and
self.options.domain_bandwidth_log_bounds is not None):
self.domain_bandwidth_log_bounds = self.options.domain_bandwidth_log_bounds
else:
self.domain_bandwidth_log_bounds = self._get_se_kernel_bounds(
self.domain_dim, self.ZX_std_norm, self.options.domain_use_same_bandwidth)
self.hp_bounds.extend(self.domain_bandwidth_log_bounds)
def _domain_poly_kernel_setup(self):
""" Sets up the domainity kernel as a Poly kernel. """
self.domain_scaling_log_bounds = self._get_poly_kernel_bounds(self.ZX,
self.options.domain_use_same_scalings)
self.hp_bounds.extend(self.domain_scaling_log_bounds)
@classmethod
def _get_se_kernel_bounds(cls, dim, single_bw_bounds, use_same_bandwidth):
""" Gets bandwidths for the SE kernel. """
if isinstance(single_bw_bounds, float) or isinstance(single_bw_bounds, int):
single_bw_bounds = [0.01*single_bw_bounds, 10*single_bw_bounds]
single_bandwidth_log_bounds = [np.log(x) for x in single_bw_bounds]
bandwidth_log_bounds = ([single_bandwidth_log_bounds] if use_same_bandwidth
else [single_bandwidth_log_bounds] * dim)
return bandwidth_log_bounds
def _get_poly_kernel_bounds(self, data, use_same_scalings):
""" Gets bandwidths for the Polynomial kerne. """
# TODO: implement poly kernel
raise NotImplementedError('Yet to implement polynomial kernel.')
# _child_set_up methods end here -------------------------------------------------------
# build_gp Methods
# ======================================================================================
def _child_build_gp(self, gp_hyperparams):
""" Builds a Multi-fidelity GP from the hyper-parameters. """
# pylint: disable=too-many-branches
# Noise variance ------------------------------------
if self.options.noise_var_type == 'tune':
noise_var = np.exp(gp_hyperparams[0])
gp_hyperparams = gp_hyperparams[1:]
elif self.options.noise_var_type == 'label':
noise_var = self.options.noise_var_label * (self.Y.std()**2)
else:
noise_var = self.options.noise_var_value
# Mean function -------------------------------------
if hasattr(self.options, 'mean_func') and self.options.mean_func is not None:
mean_func = self.options.mean_func
else:
if self.options.mean_func_type == 'mean':
mean_func_const_value = self.YY.mean()
elif self.options.mean_func_type == 'median':
mean_func_const_value = np.median(self.YY)
elif self.options.mean_func_type == 'upper_bound':
mean_func_const_value = np.mean(self.YY) + 3 * np.std(self.YY)
elif self.options.mean_func_type == 'const':
mean_func_const_value = self.options.mean_func_const
else:
mean_func_const_value = 0
mean_func = lambda x: np.array([mean_func_const_value] * len(x))
# TODO: The noise and mean parts are reusing a lot of code from
# gp_instances.SimpleGPFitter. Think of merging these two.
# Kernel scale ---------------------------------------
ke_scale = np.exp(gp_hyperparams[0])
gp_hyperparams = gp_hyperparams[1:]
# Fidelity kernel ------------------------------------
if self.options.fidel_kernel_type == 'se':
fidel_kernel, gp_hyperparams = self._get_se_kernel(self.fidel_dim,
gp_hyperparams, self.options.fidel_use_same_bandwidth)
elif self.options.fidel_kernel_type == 'poly':
fidel_kernel, gp_hyperparams = self._get_poly_kernel(self.fidel_dim,
self.options.fidel_poly_order, gp_hyperparams,
self.options.fidel_use_same_scalings)
# Domain kernel --------------------------------------
if self.options.domain_kernel_type == 'se':
domain_kernel, gp_hyperparams = self._get_se_kernel(self.domain_dim,
gp_hyperparams, self.options.domain_use_same_bandwidth)
elif self.options.domain_kernel_type == 'poly':
domain_kernel, gp_hyperparams = self._get_poly_kernel(self.domain_dim,
self.options.domain_poly_order, gp_hyperparams,
self.options.domain_use_same_scalings)
# Construct and return MF GP
return MFGP(self.ZX, self.YY, range(self.fidel_dim),
range(self.fidel_dim, self.domain_dim + self.fidel_dim),
ke_scale, fidel_kernel, domain_kernel, mean_func, noise_var,
reporter=self.reporter)
@classmethod
def _get_se_kernel(cls, dim, gp_hyperparams, use_same_bandwidth):
""" Builds a squared exponential kernel. """
if use_same_bandwidth:
ke_dim_bandwidths = [np.exp(gp_hyperparams[0])] * dim
gp_hyperparams = gp_hyperparams[1:]
else:
ke_dim_bandwidths = np.exp(gp_hyperparams[0:dim])
gp_hyperparams = gp_hyperparams[dim:]
kernel = SEKernel(dim=dim, scale=1, dim_bandwidths=ke_dim_bandwidths)
return kernel, gp_hyperparams
@classmethod
def _get_poly_kernel(cls, dim, order, gp_hyperparams, use_same_scalings):
""" Builds a polynomial kernel. """
if use_same_scalings:
ke_dim_scalings = [np.exp(gp_hyperparams[0])] * dim
gp_hyperparams = gp_hyperparams[1:]
else:
ke_dim_scalings = np.exp(gp_hyperparams[0:dim])
gp_hyperparams = gp_hyperparams[dim:]
kernel = PolyKernel(dim=dim, order=order, scale=1, dim_scalings=ke_dim_scalings)
return kernel, gp_hyperparams
# _child_build_gp methods end here -----------------------------------------------------
| 16,675 | 45.974648 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/gen_mfgp_sample.py
|
"""
Used to generate a sample from an MFGP sample.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=too-many-locals
# pylint: disable=no-name-in-module
# pylint: disable=superfluous-parens
import numpy as np
from scipy.interpolate import RectBivariateSpline
# Local imports
from gp.kernel import SEKernel
import mf_func
import mf_gp
from utils.ancillary_utils import plot_2d_function
num_per_dim = 50
spline_degree = 3
def gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed):
""" Generates an mfgp sample as a mfof. """
if mfgp.fidel_dim != 1 or mfgp.domain_dim != 1:
raise NotImplementedError('Only implemented 1 dimensional fidel/domain so far!')
# Get/set the random state.
st0 = np.random.get_state()
np.random.seed(random_seed)
# Set some attributes up
fidel_bounds = [[0, 1]] * mfgp.fidel_dim
domain_bounds = [[0, 1]] * mfgp.domain_dim
opt_fidel = np.array([1])
# This part of the code relies on dim_z = dim_x = 1
# Create a grid for interpolation
dim_grid = np.linspace(0, 1, num_per_dim)
ZZ, XX = np.meshgrid(dim_grid, dim_grid)
grid_pts = np.concatenate((ZZ.reshape(-1, 1), XX.reshape(-1, 1)), axis=1)
grid_samples = mfgp.draw_samples(1, grid_pts).ravel()
grid_samples_as_grid = grid_samples.reshape((num_per_dim, num_per_dim))
rbs = RectBivariateSpline(dim_grid, dim_grid, grid_samples_as_grid,
kx=spline_degree, ky=spline_degree)
g = lambda z, x: rbs.ev(x, z)
# compute optimum point
opt_search_grid_size = 1000
opt_search_dom_grid = np.linspace(0, 1, opt_search_grid_size).reshape(-1, 1)
opt_search_fidel_m = np.repeat(opt_fidel.reshape(-1, 1), opt_search_grid_size, axis=0)
opt_fidel_grid_vals = g(opt_search_fidel_m, opt_search_dom_grid)
opt_idx = opt_fidel_grid_vals.argmax()
opt_pt = np.array(opt_search_dom_grid[opt_idx])
opt_val = opt_fidel_grid_vals[opt_idx]
mfof = mf_func.MFOptFunction(g, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel, vectorised=True, opt_pt=opt_pt, opt_val=opt_val)
mfof.mfgp = mfgp
# before returning restate the np random state
np.random.set_state(st0)
return mfof
def gen_mfgp_sample_as_noisy_mfof(mfgp, fidel_cost_func, random_seed, noise_var):
""" Generates an mfgp sample as a noisy mfof. """
mfof = gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed)
return mf_func.get_noisy_mfof_from_mfof(mfof, noise_var)
def gen_simple_mfgp_as_mfof(fidel_bw=0.8, random_seed=512):
""" Gets a simple mfgp wrapped into an mfof. """
# Create a GP
kernel_scale = 2
fidel_kernel = SEKernel(1, 1, [fidel_bw])
domain_kernel = SEKernel(1, 1, [0.08])
noise_var = 0.1
dummy_ZZ = np.zeros((0, 1))
dummy_XX = np.zeros((0, 1))
dummy_YY = np.zeros((0))
mean_func = lambda x: np.zeros((len(x)))
mfgp = mf_gp.get_mfgp_from_fidel_domain(dummy_ZZ, dummy_XX, dummy_YY, kernel_scale,
fidel_kernel, domain_kernel, mean_func, noise_var, build_posterior=True)
# Get an mfof object
fidel_cost_func = lambda z: 0.2 + 6 * z ** 2
return gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed)
def visualise_mfof(mfof):
""" Visualises the mfof object. """
plot_func = mfof.eval_multiple
_, ax, plt = plot_2d_function(plot_func,
np.array([mfof.fidel_bounds[0], mfof.domain_bounds[0]]),
x_label='fidel', y_label='domain')
ax.scatter(mfof.opt_fidel, mfof.opt_pt, mfof.opt_val, c='r', s=100)
plt.show()
def main():
""" Main function. """
print(np.random.random())
mfof = gen_simple_mfgp_as_mfof()
visualise_mfof(mfof)
print(np.random.random())
if __name__ == '__main__':
main()
| 3,811 | 32.147826 | 89 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/mf_gp_bandit.py
|
"""
Implements Multi-fidelity GP Bandit Optimisaiton.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
from argparse import Namespace
from copy import deepcopy
import time
import numpy as np
# Local imports
from mf_func import MFOptFunction
from mf_gp import all_mf_gp_args, MFGPFitter
from mf_gpb_utils import acquisitions, fidelity_choosers
from mf_gpb_utils import is_an_opt_fidel_query, latin_hc_sampling
from utils.optimisers import direct_ft_maximise, random_maximise
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter
mf_gp_bandit_args = [
get_option_specs('capital_type', False, 'given',
('The type of capital to be used. If \'given\', it will use the cost specified. '
'Could be one of given, cputime, or realtime')),
get_option_specs('max_iters', False, 1e5,
'The maximum number of iterations, regardless of capital.'),
get_option_specs('gamma_0', False, '1',
('The multiplier in front of the default threshold value for switching. Should be',
'a scalar or the string \'adapt\'.')),
get_option_specs('acq', False, 'mf_gp_ucb',
'Which acquisition to use. Should be one of mf_gp_ucb, gp_ucb or gp_ei'),
get_option_specs('acq_opt_criterion', False, 'rand',
'Which optimiser to use when maximising the acquisition function.'),
get_option_specs('acq_opt_max_evals', False, -1,
'Number of evaluations when maximising acquisition. If negative uses default value.'),
get_option_specs('gpb_init', False, 'random_lower_fidels',
'How to initialise. Should be either random_lower_fidels or random.'),
get_option_specs('gpb_init_capital', False, -1.0,
('The amount of capital to be used for initialisation. If negative, will use',
'init_capital_frac fraction of the capital for optimisation.')),
get_option_specs('gpb_init_capital_frac', False, 0.1,
'The percentage of the capital to use for initialisation.'),
# The following are perhaps not so important.
get_option_specs('shrink_kernel_with_time', False, 1,
'If True, shrinks the kernel with time so that we don\'t get stuck.'),
get_option_specs('perturb_thresh', False, 1e-4,
('If the next point chosen is too close to an exisiting point by this times the '
'diameter, then we will perturb the point a little bit before querying. This is '
'mainly to avoid numerical stability issues.')),
get_option_specs('build_new_gp_every', False, 20,
'Updates the GP via a suitable procedure every this many iterations.'),
get_option_specs('report_results_every', False, 20,
'Report results every this many iterations.'),
get_option_specs('monitor_progress_every', False, 9,
('Performs some simple sanity checks to make sure we are not stuck every this many',
' iterations.')),
get_option_specs('monitor_domain_kernel_shrink', False, 0.9,
('If the optimum has not increased in a while, shrinks the kernel smoothness by this',
' much to increase variance.')),
get_option_specs('monitor_mf_thresh_increase', False, 1.5,
('If we have not queried at the highest fidelity in a while, increases the leading',
'constant by this much')),
get_option_specs('track_every_time_step', False, 0,
('If 1, it tracks every time step.')),
# TODO: implement code for next_pt_std_thresh
get_option_specs('next_pt_std_thresh', False, 0.005,
('If the std of the queried point queries below this times the kernel scale ',
'frequently we will reduce the bandwidth range')),
]
# All of them including what is needed for fitting GP.
all_mf_gp_bandit_args = all_mf_gp_args + mf_gp_bandit_args
# The MFGPBandit Class
# ========================================================================================
class MFGPBandit(object):
""" MFGPBandit Class. """
# pylint: disable=attribute-defined-outside-init
# Methods needed for construction -------------------------------------------------
def __init__(self, mf_opt_func, options=None, reporter=None):
""" Constructor. """
self.reporter = get_reporter(reporter)
if options is None:
options = load_options(all_mf_gp_bandit_args, reporter=reporter)
self.options = options
# Set up mfgp and mfof attributes
self.mfof = mf_opt_func # mfof refers to an MFOptFunction object.
self.mfgp = None
# Other set up
self._set_up()
def _set_up(self):
""" Some additional set up routines. """
# Check for legal parameter values
self._check_options_vals('capital_type', ['given', 'cputime', 'realtime'])
self._check_options_vals('acq', ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'mf_gp_ucb_finite',
'mf_sko'])
self._check_options_vals('acq_opt_criterion', ['rand', 'direct'])
if isinstance(self.options.gpb_init, str):
self._check_options_vals('gpb_init', ['random', 'random_lower_fidels'])
# Set up some book keeping parameters
self.available_capital = 0.0
self.time_step = 0
self.num_opt_fidel_queries = 0
# Copy some stuff over from mfof
copyable_params = ['fidel_dim', 'domain_dim']
for param in copyable_params:
setattr(self, param, getattr(self.mfof, param))
# Set up acquisition optimisation
self._set_up_acq_opt()
# set up variables for monitoring
self.monit_kernel_shrink_factor = 1
self.monit_thresh_coeff = 1
# Set initial history
self.history = Namespace(query_fidels=np.zeros((0, self.fidel_dim)),
query_points=np.zeros((0, self.domain_dim)),
query_vals=np.zeros(0),
query_costs=np.zeros(0),
curr_opt_vals=np.zeros(0),
query_at_opt_fidel=np.zeros(0).astype(bool),
)
@classmethod
def _check_arg_vals(cls, arg_val, arg_name, allowed_vals):
""" Checks if arg_val is in allowed_vals. """
if arg_val not in allowed_vals:
err_str = '%s should be one of %s.'%(arg_name,
' '.join([str(x) for x in allowed_vals]))
raise ValueError(err_str)
def _check_options_vals(self, option_name, allowed_vals):
""" Checks if the option option_name has taken a an allowed value. """
return self._check_arg_vals(getattr(self.options, option_name),
option_name, allowed_vals)
# Methods for setting up optimisation of acquisition ----------------------------------
def _set_up_acq_opt(self):
""" Sets up acquisition optimisation. """
# First set up function to get maximum evaluations.
if isinstance(self.options.acq_opt_max_evals, int):
if self.options.acq_opt_max_evals > 0:
self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals
else:
self.get_acq_opt_max_evals = None
else:
# In this case, the user likely passed a function here.
self.get_acq_opt_max_evals = self.options.acq_opt_max_evals
# Now based on the optimisation criterion, do additional set up
if self.options.acq_opt_criterion == 'direct':
self._set_up_acq_opt_direct()
elif self.options.acq_opt_criterion == 'rand':
self._set_up_acq_opt_rand()
else:
raise NotImplementedError('Not implemented acq opt for %s yet!'%(
self.options.acq_opt_criterion))
def _set_up_acq_opt_direct(self):
""" Sets up acquisition optimisation with direct. """
def _direct_wrap(*args):
""" A wrapper so as to only return optimal value. """
_, opt_pt, _ = direct_ft_maximise(*args)
return opt_pt
direct_lower_bounds = [0] * self.domain_dim
direct_upper_bounds = [1] * self.domain_dim
self.acq_optimise = lambda obj, max_evals: _direct_wrap(obj,
direct_lower_bounds, direct_upper_bounds, max_evals)
# Set up function for obtaining number of function evaluations.
if self.get_acq_opt_max_evals is None:
lead_const = 15 * min(5, self.domain_dim)**2
self.get_acq_opt_max_evals = lambda t: lead_const * np.sqrt(min(t, 1000))
# Acquisition function should be evaluated via single evaluations.
self.acq_query_type = 'single'
def _set_up_acq_opt_rand(self):
""" Sets up acquisition optimisation with direct. """
def _random_max_wrap(*args):
""" A wrapper so as to only return optimal value. """
_, opt_pt = random_maximise(*args)
return opt_pt
rand_bounds = np.array([[0, 1]] * self.domain_dim)
self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj,
rand_bounds, max_evals)
if self.get_acq_opt_max_evals is None:
lead_const = 7 * min(5, self.domain_dim)**2
self.get_acq_opt_max_evals = lambda t: np.clip(
lead_const * np.sqrt(min(t, 1000)), 1000, 2e4)
# Acquisition function should be evaluated via multiple evaluations
self.acq_query_type = 'multiple'
# Book keeping methods ------------------------------------------------------------
def _update_history(self, pts_fidel, pts_domain, pts_val, pts_cost, at_opt_fidel):
""" Adds a query point to the history and discounts the capital etc. """
pts_fidel = pts_fidel.reshape(-1, self.fidel_dim)
pts_domain = pts_domain.reshape(-1, self.domain_dim)
pts_val = pts_val if hasattr(pts_val, '__len__') else [pts_val]
pts_cost = pts_cost if hasattr(pts_cost, '__len__') else [pts_cost]
# Append to history
self.history.query_fidels = np.append(self.history.query_fidels, pts_fidel, axis=0)
self.history.query_points = np.append(self.history.query_points, pts_domain, axis=0)
self.history.query_vals = np.append(self.history.query_vals, pts_val, axis=0)
self.history.query_costs = np.append(self.history.query_costs, pts_cost, axis=0)
self.history.curr_opt_vals = np.append(self.history.curr_opt_vals, self.gpb_opt_val)
self.history.query_at_opt_fidel = np.append(self.history.query_at_opt_fidel,
at_opt_fidel)
def _get_min_distance_to_opt_fidel(self):
""" Computes the minimum distance to the optimal fidelity. """
dists_to_of = np.linalg.norm(self.history.query_fidels - self.mfof.opt_fidel, axis=1)
return dists_to_of.min()
def _report_current_results(self):
""" Writes the current results to the reporter. """
cost_frac = self.spent_capital / self.available_capital
report_str = ' '.join(['%s-%03d::'%(self.options.acq, self.time_step),
'cost: %0.3f,'%(cost_frac),
'#hf_queries: %03d,'%(self.num_opt_fidel_queries),
'optval: %0.4f'%(self.gpb_opt_val)
])
if self.num_opt_fidel_queries == 0:
report_str = report_str + '. min-to-of: %0.4f'%(
self._get_min_distance_to_opt_fidel())
self.reporter.writeln(report_str)
# Methods for managing the GP -----------------------------------------------------
def _build_new_gp(self):
""" Builds the GP with the data in history and stores in self.mfgp. """
if hasattr(self.mfof, 'init_mfgp') and self.mfof.init_mfgp is not None:
self.mfgp = deepcopy(self.mfof.init_mfgp)
self.mfgp.add_mf_data(self.history.query_fidels, self.history.query_points,
self.history.query_vals)
mfgp_prefix_str = 'Using given gp: '
else:
# Set domain bandwidth bounds
if self.options.shrink_kernel_with_time:
bw_ub = max(0.2, 2/(1+self.time_step)**0.25)
domain_bw_log_bounds = [[0.05, bw_ub]] * self.domain_dim
self.options.domain_bandwidth_log_bounds = np.array(domain_bw_log_bounds)
else:
self.options.domain_bandwidth_log_bounds = np.array([[0, 4]] * self.domain_dim)
# Set fidelity bandwidth bounds
self.options.fidel_bandwidth_log_bounds = np.array([[0, 4]] * self.fidel_dim)
# Call the gp fitter
mfgp_fitter = MFGPFitter(self.history.query_fidels, self.history.query_points,
self.history.query_vals, options=self.options, reporter=self.reporter)
self.mfgp, _ = mfgp_fitter.fit_gp()
mfgp_prefix_str = 'Fitting GP (t=%d): '%(self.time_step) # increase bandwidths
mfgp_str = ' -- %s%s.'%(mfgp_prefix_str, str(self.mfgp))
self.reporter.writeln(mfgp_str)
def _add_data_to_mfgp(self, fidel_pt, domain_pt, val_pt):
""" Adds data to self.mfgp. """
self.mfgp.add_mf_data(fidel_pt.reshape((-1, self.fidel_dim)),
domain_pt.reshape((-1, self.domain_dim)),
np.array(val_pt).ravel())
# Methods needed for initialisation -----------------------------------------------
def perform_initial_queries(self):
""" Performs an initial set of queries to initialise optimisation. """
if not isinstance(self.options.gpb_init, str):
raise NotImplementedError('Not implemented taking given initialisation yet.')
# First determine the initial budget.
gpb_init_capital = (self.options.gpb_init_capital if self.options.gpb_init_capital > 0
else self.options.gpb_init_capital_frac * self.available_capital)
if self.options.acq in ['gp_ucb', 'gp_ei']:
num_sf_init_pts = np.ceil(float(gpb_init_capital)/self.mfof.opt_fidel_cost)
fidel_init_pts = np.repeat(self.mfof.opt_fidel.reshape(1, -1), num_sf_init_pts,
axis=0)
elif self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite', 'mf_sko']:
fidel_init_pts = self._mf_method_random_initial_fidels_random(gpb_init_capital)
num_init_pts = len(fidel_init_pts)
domain_init_pts = latin_hc_sampling(self.domain_dim, num_init_pts)
for i in range(num_init_pts):
self.query(fidel_init_pts[i], domain_init_pts[i])
if self.spent_capital >= gpb_init_capital:
break
self.reporter.writeln('Initialised %s with %d queries, %d at opt_fidel.'%(
self.options.acq, len(self.history.query_vals), self.num_opt_fidel_queries))
def _mf_method_random_initial_fidels_interweaved(self):
"""Gets initial fidelities for a multi-fidelity method. """
rand_fidels = self.mfof.get_candidate_fidelities()
np.random.shuffle(rand_fidels)
num_rand_fidels = len(rand_fidels)
opt_fidels = np.repeat(self.mfof.opt_fidel.reshape(1, -1), num_rand_fidels, axis=0)
fidel_init_pts = np.empty((2*num_rand_fidels, self.fidel_dim), dtype=np.float64)
fidel_init_pts[0::2] = rand_fidels
fidel_init_pts[1::2] = opt_fidels
return fidel_init_pts
def _mf_method_random_initial_fidels_random(self, gpb_init_capital):
"""Gets initial fidelities for a multi-fidelity method. """
cand_fidels = self.mfof.get_candidate_fidelities()
cand_costs = self.mfof.cost(cand_fidels)
not_too_expensive_fidel_idxs = cand_costs <= (gpb_init_capital / 3.0)
fidel_init_pts = cand_fidels[not_too_expensive_fidel_idxs, :]
np.random.shuffle(fidel_init_pts)
return np.array(fidel_init_pts)
def initialise_capital(self):
""" Initialises capital. """
self.spent_capital = 0.0
if self.options.capital_type == 'cputime':
self.cpu_time_stamp = time.clock()
elif self.options.capital_type == 'realtime':
self.real_time_stamp = time.time()
def optimise_initialise(self):
""" Initialisation for optimisation. """
self.gpb_opt_pt = None
self.gpb_opt_val = -np.inf
self.initialise_capital() # Initialise costs
self.perform_initial_queries() # perform initial queries
self._build_new_gp()
# Methods needed for monitoring -------------------------------------------------
def _monitor_progress(self):
""" Monitors progress. """
# self._monitor_opt_val()
self._monitor_opt_fidel_queries()
def _monitor_opt_val(self):
""" Monitors progress of the optimum value. """
# Is the optimum increasing over time.
if (self.history.curr_opt_vals[-self.options.monitor_progress_every] * 1.01 >
self.gpb_opt_val):
recent_queries = self.history.query_points[-self.options.monitor_progress_every:, :]
recent_queries_mean = recent_queries.mean(axis=0)
dispersion = np.linalg.norm(recent_queries - recent_queries_mean, ord=2, axis=1)
dispersion = dispersion.mean() / np.sqrt(self.domain_dim)
lower_dispersion = 0.05
upper_dispersion = 0.125
if dispersion < lower_dispersion:
self.monit_kernel_shrink_factor *= self.options.monitor_domain_kernel_shrink
elif dispersion > upper_dispersion:
self.monit_kernel_shrink_factor /= self.options.monitor_domain_kernel_shrink
if not lower_dispersion < dispersion < upper_dispersion:
self.mfgp.domain_kernel.change_smoothness(self.monit_kernel_shrink_factor)
self.mfgp.build_posterior()
self.reporter.writeln('%s--monitor: Kernel shrink set to %0.4f.'%(' '*10,
self.monit_kernel_shrink_factor))
def _monitor_opt_fidel_queries(self):
""" Monitors if we querying at higher fidelities too much or too little. """
# Are we querying at higher fidelities too much or too little.
if self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite']:
of_start_query = max(0, (len(self.history.query_vals) -
2*self.options.monitor_progress_every))
of_recent_query_idxs = range(of_start_query, len(self.history.query_vals))
recent_query_at_opt_fidel = self.history.query_at_opt_fidel[of_recent_query_idxs]
recent_query_at_opt_fidel_mean = recent_query_at_opt_fidel.mean()
if not 0.25 <= recent_query_at_opt_fidel_mean <= 0.75:
if recent_query_at_opt_fidel_mean < 0.25:
self.monit_thresh_coeff *= self.options.monitor_mf_thresh_increase
else:
self.monit_thresh_coeff /= self.options.monitor_mf_thresh_increase
self.reporter.writeln(('%s-- monitor: Changing thresh_coeff to %0.3f, ' +
'recent-query-frac: %0.3f.')%(
' '*10, self.monit_thresh_coeff,
recent_query_at_opt_fidel_mean))
# Methods needed for optimisation -------------------------------------------------
def _terminate_now(self):
""" Returns true if we should terminate now. """
if self.time_step >= self.options.max_iters:
return True
return self.spent_capital >= self.available_capital
def add_capital(self, capital):
""" Adds capital. """
self.available_capital += capital
def _determine_next_query_point(self):
""" Obtains the next query point according to the acquisition. """
# Construction of acquisition function ------
if self.options.acq in ['mf_gp_ucb', 'gp_ucb', 'mf_gp_ucb_finite']:
def _acq_max_obj(x):
""" A wrapper for the mf_gp_ucb acquisition. """
ucb, _ = acquisitions.mf_gp_ucb(self.acq_query_type, x, self.mfgp,
self.mfof.opt_fidel, self.time_step)
return ucb
elif self.options.acq in ['gp_ei', 'mf_sko']:
def _acq_max_obj(x):
""" A wrapper for the gp_ei acquisition. """
return acquisitions.gp_ei(self.acq_query_type, x, self.mfgp, self.mfof.opt_fidel,
self.gpb_opt_val)
else:
raise NotImplementedError('Only implemented %s yet!.'%(self.options.acq))
# Maximise -----
next_pt = self.acq_optimise(_acq_max_obj, self.get_acq_opt_max_evals(self.time_step))
# Store results -----
acq_params = Namespace()
if self.options.acq in ['mf_gp_ucb', 'gp_ucb', 'mf_gp_ucb_finite']:
max_acq_val, beta_th = acquisitions.mf_gp_ucb_single(next_pt, self.mfgp,
self.mfof.opt_fidel, self.time_step)
acq_params.beta_th = beta_th
acq_params.thresh_coeff = self.monit_thresh_coeff
else:
max_acq_val = acquisitions.gp_ei_single(next_pt, self.mfgp, self.mfof.opt_fidel,
self.gpb_opt_val)
acq_params.max_acq_val = max_acq_val
return next_pt, acq_params
def _determine_next_fidel(self, next_pt, acq_params):
""" Determines the next fidelity. """
if self.options.acq in ['mf_gp_ucb', 'mf_gp_ucb_finite']:
next_fidel = fidelity_choosers.mf_gp_ucb(next_pt, self.mfgp, self.mfof, acq_params)
elif self.options.acq in ['mf_sko']:
next_fidel = fidelity_choosers.mf_sko(self.mfof, next_pt, self.mfgp, acq_params)
elif self.options.acq in ['gp_ucb', 'gp_ei']:
next_fidel = deepcopy(self.mfof.opt_fidel)
return next_fidel
@classmethod
def _process_next_fidel_and_pt(cls, next_fidel, next_pt):
""" Processes next point and fidel. Will do certiain things such as perturb it if its
too close to an existing point. """
return next_fidel, next_pt
def _update_capital(self, fidel_pt):
""" Updates the capital according to the cost of the current query. """
if self.options.capital_type == 'given':
pt_cost = self.mfof.cost_single(fidel_pt)
elif self.options.capital_type == 'cputime':
new_cpu_time_stamp = time.clock()
pt_cost = new_cpu_time_stamp - self.cpu_time_stamp
self.cpu_time_stamp = new_cpu_time_stamp
elif self.options.capital_type == 'realtime':
new_real_time_stamp = time.time()
pt_cost = new_real_time_stamp - self.real_time_stamp
self.real_time_stamp = new_real_time_stamp
self.spent_capital += pt_cost
return pt_cost
# The actual querying happens here
def query(self, fidel_pt, domain_pt):
""" The querying happens here. It also calls functions to update history and the
maximum value/ points. But it does *not* update the GP. """
val_pt = self.mfof.eval_single(fidel_pt, domain_pt)
cost_pt = self._update_capital(fidel_pt)
# Update the optimum point
if (np.linalg.norm(fidel_pt - self.mfof.opt_fidel) < 1e-5 and
val_pt > self.gpb_opt_val):
self.gpb_opt_val = val_pt
self.gpb_opt_pt = domain_pt
# Add to history
at_opt_fidel = is_an_opt_fidel_query(fidel_pt, self.mfof.opt_fidel)
self._update_history(fidel_pt, domain_pt, val_pt, cost_pt, at_opt_fidel)
if at_opt_fidel:
self.num_opt_fidel_queries += 1
return val_pt, cost_pt
def _time_keeping(self, reset=0):
""" Used to keep time by _track_time_step. """
curr_keep_time = time.time()
curr_keep_clock = time.clock()
if reset:
self.last_keep_time = curr_keep_time
self.last_keep_clock = curr_keep_clock
else:
time_diff = curr_keep_time - self.last_keep_time
clock_diff = curr_keep_clock - self.last_keep_clock
self.last_keep_time = curr_keep_time
self.last_keep_clock = curr_keep_clock
return round(time_diff, 3), round(clock_diff, 3)
def _track_time_step(self, msg=''):
""" Used to track time step. """
if not self.options.track_every_time_step:
return
if not msg:
self._time_keeping(0)
self.reporter.writeln('')
else:
self.reporter.write('%s: t%s, '%(msg, self._time_keeping()))
# Main optimisation function ------------------------------------------------------
def optimise(self, max_capital):
""" This executes the sequential optimisation routine. """
# Preliminaries
self.add_capital(max_capital)
self.optimise_initialise()
# Main loop --------------------------
while not self._terminate_now():
self.time_step += 1 # increment time
if self.time_step % self.options.build_new_gp_every == 0: # Build GP if needed
self._build_new_gp()
if self.time_step % self.options.monitor_progress_every == 0:
self._monitor_progress()
# Determine next query
self._track_time_step()
next_pt, acq_params = self._determine_next_query_point()
self._track_time_step('#%d, next point'%(self.time_step))
next_fidel = self._determine_next_fidel(next_pt, acq_params)
next_fidel, next_pt = self._process_next_fidel_and_pt(next_fidel, next_pt)
self._track_time_step('next fidel')
next_val, _ = self.query(next_fidel, next_pt)
self._track_time_step('querying')
# update the gp
self._add_data_to_mfgp(next_fidel, next_pt, next_val)
self._track_time_step('gp-update')
if self.time_step % self.options.report_results_every == 0: # report results
self._report_current_results()
return self.gpb_opt_pt, self.gpb_opt_val, self.history
# MFGPBandit Class ends here ========================================================
# APIs for MF GP Bandit optimisation -----------------------------------------------------
# Optimisation from a mf_Func.MFOptFunction object
def mfgpb_from_mfoptfunc(mf_opt_func, max_capital, acq=None, options=None,
reporter='default'):
""" MF GP Bandit optimisation with an mf_func.MFOptFunction object. """
# if not isinstance(mf_opt_func, MFOptFunction):
# raise ValueError('mf_opt_func should be a mf_func.MFOptFunction instance.')
if acq is not None:
if options is None:
reporter = get_reporter(reporter)
options = load_options(all_mf_gp_bandit_args, reporter=reporter)
options.acq = acq
return (MFGPBandit(mf_opt_func, options, reporter)).optimise(max_capital)
# Main API
def mfgpb(mf_func, fidel_cost_func, fidel_bounds, domain_bounds, opt_fidel, max_capital,
acq=None, options=None, reporter=None, vectorised=True, true_opt_pt=None,
true_opt_val=None):
# pylint: disable=too-many-arguments
""" This function executes GP Bandit (Bayesian Optimisation)
Input Arguments:
- mf_func: The multi-fidelity function to be optimised.
- fidel_cost_func: The function which describes the cost for each fidelity.
- fidel_bounds, domain_bounds: The bounds for the fidelity space and domain.
- opt_fidel: The point in the fidelity space at which to optimise mf_func.
- max_capital: The maximum capital for optimisation.
- options: A namespace which gives other options.
- reporter: A reporter object to write outputs.
- vectorised: If true, it means mf_func and fidel_cost_func take matrix inputs. If
false, they take only single point inputs.
- true_opt_pt, true_opt_val: The true optimum point and value (if known). Mostly for
experimenting with synthetic problems.
Returns: (gpb_opt_pt, gpb_opt_val, history)
- gpb_opt_pt, gpb_opt_val: The optimum point and value.
- history: A namespace which contains a history of all the previous queries.
"""
mf_opt_func = MFOptFunction(mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel, vectorised, true_opt_pt, true_opt_val)
return mfgpb_from_mfoptfunc(mf_opt_func, max_capital, acq, options, reporter)
| 26,868 | 46.894831 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/unittest_mf_gp.py
|
"""
Unit tests for mf_gp.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=superfluous-parens
# pylint: disable=maybe-no-member
# pylint: disable=abstract-class-not-used
from argparse import Namespace
from copy import deepcopy
import numpy as np
# Local
from gp import kernel
from gp.unittest_gp_instances import fit_gp_with_dataset
import mf_gp
from utils.base_test_class import BaseTestClass, execute_tests
from utils.general_utils import compute_average_sq_prediction_error
from utils.option_handler import load_options
# Functions to create data ---------------------------------------------------------------
def _get_mf_gp_options(tune_noise):
""" Gets the options for the dataset. """
options = load_options(mf_gp.all_mf_gp_args)
options.noise_var_type = 'tune' if tune_noise else options.noise_var_type
return options
def gen_data_from_func(fzx, N, dim_z, dim_x):
""" Generates data from the function. """
Z = np.random.random((N, dim_z))
X = np.random.random((N, dim_x))
Y = fzx(Z, X)
return Z, X, Y
def _gen_datasets_from_func(fzx, N, dim_z, dim_x):
""" Generates train and test datasets from the function. """
Z_tr, X_tr, Y_tr = gen_data_from_func(fzx, N, dim_z, dim_x)
ZX_tr = np.concatenate((Z_tr, X_tr), axis=1)
Z_te, X_te, Y_te = gen_data_from_func(fzx, 2 * N, dim_z, dim_x)
ZX_te = np.concatenate((Z_te, X_te), axis=1)
return Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te
def gen_mf_gp_test_data():
""" Generates test data. """
# pylint: disable=too-many-locals
# dataset 1
fzx = lambda z, x: (x**2).sum(axis=1) + (z**2).sum(axis=1)
dim_z = 2
dim_x = 3
N = 20
Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te = _gen_datasets_from_func(
fzx, N, dim_z, dim_x)
fidel_kernel = kernel.SEKernel(dim=dim_z, scale=1, dim_bandwidths=0.5)
domain_kernel = kernel.SEKernel(dim=dim_x, scale=1, dim_bandwidths=0.5)
kernel_scale = 2
tune_noise = True
dataset_1 = Namespace(Z_tr=Z_tr, X_tr=X_tr, Y_tr=Y_tr, ZX_tr=ZX_tr,
Z_te=Z_te, X_te=X_te, Y_te=Y_te, ZX_te=ZX_te,
fidel_kernel=fidel_kernel, domain_kernel=domain_kernel,
kernel_scale=kernel_scale, tune_noise=tune_noise)
# dataset 2
fx = lambda x: -70 * (x + 0.01) * (x - 0.31) * (x + 0.51) * (x - 0.71) * (x - 0.98)
fzx = lambda z, x: (np.exp((z - 0.8)**2) * fx(x)).sum(axis=1)
N = 100
Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te = _gen_datasets_from_func(fzx, N, 1, 1)
fidel_kernel = kernel.SEKernel(dim=1, scale=1, dim_bandwidths=1.0)
domain_kernel = kernel.SEKernel(dim=1, scale=1, dim_bandwidths=0.25)
kernel_scale = 2
tune_noise = True
dataset_2 = Namespace(Z_tr=Z_tr, X_tr=X_tr, Y_tr=Y_tr, ZX_tr=ZX_tr,
Z_te=Z_te, X_te=X_te, Y_te=Y_te, ZX_te=ZX_te,
fidel_kernel=fidel_kernel, domain_kernel=domain_kernel,
kernel_scale=kernel_scale, tune_noise=tune_noise)
# return all datasets
return [dataset_1, dataset_2]
def get_init_and_post_gp(fidel_dim, domain_dim, num_data, kernel_scale, dim_bw_power=0.5):
""" Generates a GP and data, constructs posterior and returns everything. """
# pylint: disable=too-many-locals
kernel_bw_scaling = float(fidel_dim + domain_dim) ** dim_bw_power
fidel_kernel = kernel.SEKernel(fidel_dim, 1,
(1 + np.random.random(fidel_dim)) * kernel_bw_scaling)
domain_kernel = kernel.SEKernel(domain_dim, 1,
(0.1 + 0.2*np.random.random(domain_dim)) * kernel_bw_scaling)
mean_func_const_val = (2 + np.random.random()) * (1 + np.random.random())
mean_func = lambda x: np.array([mean_func_const_val] * len(x))
noise_var = 0.05 * np.random.random()
Z_init = np.zeros((0, fidel_dim))
X_init = np.zeros((0, domain_dim))
Y_init = np.zeros((0))
init_gp = mf_gp.get_mfgp_from_fidel_domain(Z_init, X_init, Y_init, kernel_scale,
fidel_kernel, domain_kernel, mean_func, noise_var)
# Now construct the data
post_gp = deepcopy(init_gp)
Z_data = np.random.random((num_data, fidel_dim))
X_data = np.random.random((num_data, domain_dim))
Y_wo_noise = post_gp.draw_mf_samples(1, Z_data, X_data).ravel()
Y_data = Y_wo_noise + np.random.normal(0, np.sqrt(noise_var), Y_wo_noise.shape)
post_gp.add_mf_data(Z_data, X_data, Y_data)
# Put everything in a namespace
ret = Namespace(init_gp=init_gp, post_gp=post_gp, Z_data=Z_data, X_data=X_data,
Y_data=Y_data, fidel_kernel=fidel_kernel, domain_kernel=domain_kernel,
mean_func=mean_func, noise_var=noise_var, kernel_scale=kernel_scale,
fidel_dim=fidel_dim, domain_dim=domain_dim, num_data=num_data)
return ret
def gen_mf_gp_instances():
""" Generates some MF GP instances. """
# pylint: disable=star-args
# The following list of lists maintains each problem instance in the following
# order. (fidel_dim, domain_dim, num_data, kernel_scale)
data = [[1, 1, 5, 1], [2, 4, 15, 2], [5, 10, 100, 2], [3, 20, 200, 4]]
return [get_init_and_post_gp(*d) for d in data]
# Functions to build GPs -----------------------------------------------------------------
def build_mfgp_with_dataset(dataset):
""" Builds a MF GP by using some reasonable values for the parameters. """
mean_func = lambda x: np.array([np.median(dataset.Y_tr)] * len(x))
noise_var = (dataset.Y_tr.std()**2)/20
return mf_gp.get_mfgp_from_fidel_domain(dataset.Z_tr, dataset.X_tr, dataset.Y_tr,
dataset.kernel_scale, dataset.fidel_kernel, dataset.domain_kernel,
mean_func, noise_var)
def fit_simple_gp_with_dataset(dataset):
""" Builds a simple GP with the dataset. """
return fit_gp_with_dataset([dataset.ZX_tr, dataset.Y_tr])
def fit_mfgp_with_dataset(dataset):
""" Fits a GP with the dataset. """
options = _get_mf_gp_options(dataset.tune_noise)
options.mean_func_type = 'median'
fitted_gp, _ = (mf_gp.MFGPFitter(dataset.Z_tr, dataset.X_tr, dataset.Y_tr,
options=options)).fit_gp()
return fitted_gp
# Test cases -----------------------------------------------------------------------------
class MFGPTestCase(BaseTestClass):
""" Unit tests for the MF GP. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for the tests. """
self.datasets = gen_mf_gp_test_data()
def test_eval_at_fidel(self):
""" Tests eval at fidel. """
self.report('MFGP.eval_at_fidel vs GP.eval.')
ds = self.datasets[0]
curr_gp = build_mfgp_with_dataset(ds)
curr_pred, curr_std = curr_gp.eval_at_fidel(ds.Z_te, ds.X_te, uncert_form='std')
alt_pred, alt_std = curr_gp.eval(np.concatenate((ds.Z_te, ds.X_te), axis=1),
uncert_form='std')
assert np.linalg.norm(curr_pred - alt_pred) < 1e-5
assert np.linalg.norm(curr_std - alt_std) < 1e-5
def test_eval(self):
""" Tests the evaluation. """
self.report('MFGP.eval_at_fidel: Probabilistic test, might fail sometimes.')
num_successes = 0
for ds in self.datasets:
curr_gp = build_mfgp_with_dataset(ds)
curr_pred, _ = curr_gp.eval_at_fidel(ds.Z_te, ds.X_te)
curr_err = compute_average_sq_prediction_error(ds.Y_te, curr_pred)
const_err = compute_average_sq_prediction_error(ds.Y_te, ds.Y_tr.mean())
success = curr_err < const_err
self.report(('(N,DZ,DX)=' + str(ds.Z_tr.shape + (ds.X_tr.shape[1],)) +
':: MFGP-err= ' + str(curr_err) + ', Const-err= ' + str(const_err) +
', success=' + str(success)), 'test_result')
num_successes += int(success)
assert num_successes > 0.6 *len(self.datasets)
def test_compute_log_marginal_likelihood(self):
""" Tests compute_log_marginal_likelihood. Does not test for accurate implementation.
Only tests if the function runs without runtime errors. """
self.report('MFGP.compute_log_marginal_likelihood: ** Runtime test errors only **')
for ds in self.datasets:
curr_gp = build_mfgp_with_dataset(ds)
lml = curr_gp.compute_log_marginal_likelihood()
self.report(('(N,DZ,DX)=' + str(ds.Z_tr.shape + (ds.X_tr.shape[1],)) +
':: MFGP-lml= ' + str(lml)), 'test_result')
class MFGPFitterTestCase(BaseTestClass):
""" Unit tests for the MFGPFitter class. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for the tests. """
self.datasets = gen_mf_gp_test_data()
def test_set_up(self):
""" Tests if everything has been set up properly. """
for ds in self.datasets:
options = _get_mf_gp_options(ds.tune_noise)
fitter = mf_gp.MFGPFitter(ds.Z_tr, ds.X_tr, ds.Y_tr, options)
# The number of hyperparameters should be 1 for the kernel, the total dimensionality
# of the fidelity space and domain plus one more if we are tuning options.
num_hps = 1 + ds.Z_tr.shape[1] + ds.X_tr.shape[1] + ds.tune_noise
constructed_hp_bounds = ([fitter.scale_log_bounds] +
fitter.fidel_bandwidth_log_bounds +
fitter.domain_bandwidth_log_bounds)
if ds.tune_noise:
constructed_hp_bounds = [fitter.noise_var_log_bounds] + constructed_hp_bounds
constructed_hp_bounds = np.array(constructed_hp_bounds)
assert fitter.hp_bounds.shape == (num_hps, 2)
assert np.linalg.norm(constructed_hp_bounds - fitter.hp_bounds) < 1e-5
def test_marginal_likelihood(self):
""" Test for marginal likelihood. """
self.report(('Marginal Likelihood for fitted MFGP. Probabilistic test, might fail.' +
' The domain bandwidth should be smaller than the fidelity bandwidth ' +
'for the second dataset.'))
num_successes = 0
for ds in self.datasets:
naive_gp = build_mfgp_with_dataset(ds)
fitted_gp = fit_mfgp_with_dataset(ds)
naive_lml = naive_gp.compute_log_marginal_likelihood()
fitted_lml = fitted_gp.compute_log_marginal_likelihood()
success = naive_lml <= fitted_lml
self.report('(N,DZ,DX)= %s, naive-lml=%0.4f, fitted-lml=%0.4f, succ=%d'%(
str(ds.Z_tr.shape + (ds.X_tr.shape[1],)), naive_lml, fitted_lml, success),
'test_result')
self.report(' Naive GP :: %s'%(str(naive_gp)), 'test_result')
self.report(' Fitted GP:: %s'%(str(fitted_gp)), 'test_result')
num_successes += success
assert num_successes > 0.6 * len(self.datasets)
def test_eval(self):
""" Test for prediction. """
self.report('Prediction for fitted Simple vs MF GPs. Probabilistic test, might fail.')
num_successes = 0
for ds in self.datasets:
simple_gp = fit_simple_gp_with_dataset(ds)
simple_preds, _ = simple_gp.eval(ds.ZX_te)
simple_err = compute_average_sq_prediction_error(ds.Y_te, simple_preds)
fitted_gp = fit_mfgp_with_dataset(ds)
fitted_preds, _ = fitted_gp.eval_at_fidel(ds.Z_te, ds.X_te)
fitted_err = compute_average_sq_prediction_error(ds.Y_te, fitted_preds)
success = abs(fitted_err - simple_err) < 1e-2
self.report('(N,DZ,DX)= %s, simple-err=%0.4f, mfgp-err=%0.4f, succ=%d'%(
str(ds.Z_tr.shape + (ds.X_tr.shape[1],)), simple_err, fitted_err, success),
'test_result')
num_successes += success
assert num_successes > 0.6 * len(self.datasets)
def test_draw_samples(self):
""" Test for drawing samples. """
self.report('Test for drawing samples. Probabilistic test, might fail.')
total_coverage = 0
num_test_pts = 100
num_samples = 5 # Draw 5 samples at each point - just for testing.
mfgp_instances = gen_mf_gp_instances()
for inst in mfgp_instances:
Z_test = np.random.random((num_test_pts, inst.fidel_dim))
X_test = np.random.random((num_test_pts, inst.domain_dim))
F_test = inst.post_gp.draw_mf_samples(num_samples, Z_test, X_test)
post_mean, post_std = inst.post_gp.eval_at_fidel(Z_test, X_test, uncert_form='std')
conf_band_width = 1.96
ucb = post_mean + conf_band_width * post_std
lcb = post_mean - conf_band_width * post_std
below_ucb = F_test <= ucb
above_lcb = F_test >= lcb
coverage = (below_ucb * above_lcb).mean()
total_coverage += coverage
self.report(('(n, DZ, DX) = (%d, %d, %d)::: Coverage for 0.95 credible interval: ' +
'%0.4f')%(inst.num_data, inst.fidel_dim, inst.domain_dim, coverage),
'test_result')
avg_coverage = total_coverage / len(mfgp_instances)
avg_coverage_is_good = avg_coverage > 0.9
self.report('Avg coverage (%0.3f) is larger than 0.9? %d'%(avg_coverage,
avg_coverage_is_good), 'test_result')
assert avg_coverage_is_good
if __name__ == '__main__':
execute_tests()
| 12,820 | 43.985965 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/mfopt_experimenters.py
|
"""
Harness for conducting experiments for MF Optimisation.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
from datetime import datetime
import numpy as np
import os
# Local imports
from mf_gp_bandit import mfgpb_from_mfoptfunc
from mf_func import NoisyMFOptFunction
from utils.experimenters import BasicExperimenter
from utils.optimisers import direct_maximise_from_mfof
from utils.reporters import get_reporter
# scratch
from scratch.get_finite_fidel_mfof import get_finite_mfof_from_mfof
class MFOptExperimenter(BasicExperimenter):
""" Base class for running experiments. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, experiment_name, mfof, max_capital, methods, num_experiments,
save_dir, save_file_prefix='', method_options=None, method_reporter=None,
*args, **kwargs):
""" Constructor for MFOptExperiment. See BasicExperimenter for more args.
mfof: A MFOptFunction Object.
methods: are the methods we will use for MF optimisation.
method_options: a dictionary which gives the options for each option.
"""
save_file_name = self._get_save_file_name(save_dir, experiment_name, save_file_prefix)
super(MFOptExperimenter, self).__init__(experiment_name, num_experiments,
save_file_name, *args, **kwargs)
self.mfof = mfof
self.max_capital = max_capital
self.methods = methods
self.num_methods = len(self.methods)
self.method_options = (method_options if method_options else
{key: None for key in method_options})
self.method_reporter = get_reporter(method_reporter)
self.noisy_observations = isinstance(mfof, NoisyMFOptFunction)
self._set_up_saving() # Set up for saving results.
@classmethod
def _get_save_file_name(cls, save_dir, experiment_name, save_file_prefix):
""" Gets the save file name. """
save_file_prefix = save_file_prefix if save_file_prefix else experiment_name
save_file_name = '%s-%s.mat'%(save_file_prefix,
datetime.now().strftime('%m%d-%H%M%S'))
save_file_name = os.path.join(save_dir, save_file_name)
return save_file_name
def _set_up_saving(self):
""" Runs some routines to set up saving. """
# Store methods and the options in to_be_saved
self.to_be_saved.methods = self.methods
self.to_be_saved.method_options = self.method_options
# Data about the problem
self.to_be_saved.true_opt_val = (self.mfof.opt_val if self.mfof.opt_val is not None
else -np.inf)
self.to_be_saved.true_opt_pt = (self.mfof.opt_pt if self.mfof.opt_pt is not None
else np.zeros((1)))
self.to_be_saved.opt_fidel = self.mfof.opt_fidel
self.to_be_saved.opt_fidel_unnormalised = self.mfof.opt_fidel_unnormalised
self.to_be_saved.fidel_dim = self.mfof.fidel_dim
self.to_be_saved.domain_dim = self.mfof.domain_dim
self.fidel_dim = self.mfof.fidel_dim
self.domain_dim = self.mfof.domain_dim
# For the results
self.data_to_be_extracted = ['query_fidels', 'query_points', 'query_vals',
'query_costs', 'curr_opt_vals', 'query_at_opt_fidel']
self.data_to_be_saved = self.data_to_be_extracted + ['true_curr_opt_vals']
for data_type in self.data_to_be_saved:
setattr(self.to_be_saved, data_type, self._get_new_empty_results_array())
def _get_new_empty_results_array(self):
""" Returns a new empty array to be used for storing results. """
return np.empty((self.num_methods, 0), dtype=np.object)
def _get_new_iter_results_array(self):
""" Returns a new empty array for saving results of the current iteration. """
return np.empty((self.num_methods, 1), dtype=np.object)
def _print_method_header(self, method):
""" Prints a header for the current method. """
experiment_header = '--Exp %d/%d. Method: %s with capital %0.4f'%(
self.experiment_iter, self.num_experiments, method, self.max_capital)
experiment_header = '\n' + experiment_header + '\n' + '-' * len(experiment_header)
self.reporter.writeln(experiment_header)
def _print_method_result(self, method, comp_opt_val, num_opt_fidel_evals):
""" Prints the result for this method. """
result_str = 'Method: %s achieved max-val %0.5f in %d opt-fidel queries.'%(method,
comp_opt_val, num_opt_fidel_evals)
self.reporter.writeln(result_str)
def run_experiment_iteration(self):
""" Runs each method in self.methods once and stores the results to to_be_saved."""
# pylint: disable=too-many-branches
curr_iter_results = Namespace()
for data_type in self.data_to_be_saved:
setattr(curr_iter_results, data_type, self._get_new_iter_results_array())
# We will go through each method in this loop ----------------------------------
for meth_iter in range(self.num_methods):
method = self.methods[meth_iter]
self._print_method_header(method)
# Create arrays for storing.
# Run the method.
if method in ['mf_gp_ucb_finite', 'mf_sko']:
curr_mfof = get_finite_mfof_from_mfof(self.mfof,
self.method_options[method].finite_fidels,
self.method_options[method].finite_fidels_is_normalised)
else:
curr_mfof = self.mfof
if method in ['gp_ucb', 'gp_ei', 'mf_gp_ucb', 'mf_gp_ucb_finite', 'mf_sko']:
_, _, opt_hist = mfgpb_from_mfoptfunc(curr_mfof, self.max_capital,
acq=method,
options=self.method_options[method],
reporter=self.method_reporter)
if method in ['gp_ucb', 'gp_ei']:
# save some parameters for DiRect because I can't control each evaluation in the
# fortran library
direct_num_evals = len(opt_hist.query_fidels)
direct_av_cost = opt_hist.query_costs.mean()
elif method == 'direct':
# As this is deterministic, just run it once.
if self.experiment_iter == 1:
_, _, opt_hist = direct_maximise_from_mfof(self.mfof, direct_num_evals)
num_actual_direct_evals = len(opt_hist.curr_opt_vals)
opt_hist.query_fidels = np.repeat(self.mfof.opt_fidel.reshape(1, -1),
num_actual_direct_evals, axis=0)
opt_hist.query_points = np.zeros((num_actual_direct_evals, self.mfof.fidel_dim))
opt_hist.query_vals = np.zeros((num_actual_direct_evals))
opt_hist.query_costs = direct_av_cost * np.ones((num_actual_direct_evals))
opt_hist.query_at_opt_fidel = np.ones((num_actual_direct_evals), dtype=bool)
else:
self.reporter.writeln('Not running %s this iteration as it is deterministic.'%(
method))
opt_hist = Namespace()
for data_type in self.data_to_be_extracted:
data_pointer = getattr(self.to_be_saved, data_type)
setattr(opt_hist, data_type, data_pointer[meth_iter, 0])
else:
raise ValueError('Unknown method %s!'%(method))
# Save noiseless function values results
if self.noisy_observations:
num_evals = len(opt_hist.curr_opt_vals)
curr_best = -np.inf
opt_hist.true_curr_opt_vals = np.zeros((num_evals))
for i in range(num_evals):
if opt_hist.query_at_opt_fidel[i]:
curr_value = self.mfof.eval_single_noiseless(self.mfof.opt_fidel,
opt_hist.query_points[i])
if curr_value > curr_best:
curr_best = curr_value
opt_hist.true_curr_opt_vals[i] = curr_best
else:
opt_hist.true_curr_opt_vals = opt_hist.curr_opt_vals
# Save the results.
for data_type in self.data_to_be_saved:
data = getattr(opt_hist, data_type)
data_pointer = getattr(curr_iter_results, data_type)
data_pointer[meth_iter, 0] = data
# Print out the results
comp_opt_val = opt_hist.true_curr_opt_vals[-1]
self._print_method_result(method, comp_opt_val, opt_hist.query_at_opt_fidel.sum())
# for meth_iter ends here -------------------------------------------------------
# Now save the results of this experiment in to_be_saved
for data_type in self.data_to_be_saved:
data = getattr(curr_iter_results, data_type)
curr_data_to_be_saved = getattr(self.to_be_saved, data_type)
updated_data_to_be_saved = np.append(curr_data_to_be_saved, data, axis=1)
setattr(self.to_be_saved, data_type, updated_data_to_be_saved)
def get_iteration_header(self):
""" Header for iteration. """
noisy_str = ('Noiseless' if not self.noisy_observations else
'noisy (var=%0.4f)'%(self.mfof.noise_var))
opt_val_str = '?' if self.mfof.opt_val is None else '%0.4f'%(self.mfof.opt_val)
ret = '%s(p=%d,d=%d), max=%s, max-capital %0.3f, %s'%(self.experiment_name,
self.mfof.fidel_dim, self.mfof.domain_dim, opt_val_str,
self.max_capital, noisy_str)
return ret
| 9,375 | 46.593909 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/demo_mfgp_gp_comp.py
|
"""
A simple demo for mf gps. We will use the same data and see if the GPs learned are
the same.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=superfluous-parens
import numpy as np
# Local
from gp import gp_instances
import mf_gp
from utils.reporters import BasicReporter
from utils.option_handler import load_options
from utils.general_utils import compute_average_sq_prediction_error
from unittest_mf_gp import gen_data_from_func
def get_data():
""" Generates data for the demo. """
fzx = lambda z, x: (z**2).sum(axis=1) + (x**2).sum(axis=1)
dim_z = 1
dim_x = 1
N = 100
Z_tr, X_tr, Y_tr = gen_data_from_func(fzx, N, dim_z, dim_x)
ZX_tr = np.concatenate((Z_tr, X_tr), axis=1)
Z_te, X_te, Y_te = gen_data_from_func(fzx, N, dim_z, dim_x)
ZX_te = np.concatenate((Z_te, X_te), axis=1)
return Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te
def _print_str_results(reporter, descr, sgp_result, mfgp_result):
""" Prints the result out. """
print_str = '%s:: S-GP: %s, MF-GP: %s'%(descr, sgp_result, mfgp_result)
reporter.writeln(print_str)
def _print_float_results(reporter, descr, sgp_result, mfgp_result):
""" Prints float results. """
sgp_result = '%0.4f'%(sgp_result)
mfgp_result = '%0.4f'%(mfgp_result)
_print_str_results(reporter, descr, sgp_result, mfgp_result)
def main():
""" Main function. """
# pylint: disable=too-many-locals
# pylint: disable=maybe-no-member
np.random.seed(0)
reporter = BasicReporter()
Z_tr, X_tr, Y_tr, ZX_tr, Z_te, X_te, Y_te, ZX_te = get_data()
sgp_options = load_options(gp_instances.all_simple_gp_args, 'GP', reporter=reporter)
mfgp_options = load_options(mf_gp.all_mf_gp_args, 'MFGP', reporter=reporter)
mfgp_options.mean_func_type = 'median'
# Fit the GPs.
sgp_fitter = gp_instances.SimpleGPFitter(ZX_tr, Y_tr, sgp_options, reporter=reporter)
sgp, opt_s = sgp_fitter.fit_gp()
mfgp_fitter = mf_gp.MFGPFitter(Z_tr, X_tr, Y_tr, mfgp_options, reporter=reporter)
mfgp, opt_mf = mfgp_fitter.fit_gp()
opt_s = (np.array(opt_s).round(4))
opt_mf = (np.array(opt_mf).round(4))
s_bounds = sgp_fitter.hp_bounds.round(3)
mf_bounds = mfgp_fitter.hp_bounds.round(3)
# Print out some fitting statistics
_print_str_results(reporter, 'Opt-pts', str(opt_s), str(opt_mf))
_print_str_results(reporter, 'Opt-bounds', str(s_bounds), str(mf_bounds))
# The marginal likelihoods
sgp_lml = sgp.compute_log_marginal_likelihood()
mfgp_lml = mfgp.compute_log_marginal_likelihood()
_print_float_results(reporter, 'Log_Marg_Like', sgp_lml, mfgp_lml)
# Train errors
s_pred, _ = sgp.eval(ZX_tr)
mf_pred, _ = mfgp.eval_at_fidel(Z_tr, X_tr)
sgp_tr_err = compute_average_sq_prediction_error(Y_tr, s_pred)
mfgp_tr_err = compute_average_sq_prediction_error(Y_tr, mf_pred)
_print_float_results(reporter, 'Train Error', sgp_tr_err, mfgp_tr_err)
# Test errors
s_pred, _ = sgp.eval(ZX_te)
mf_pred, _ = mfgp.eval_at_fidel(Z_te, X_te)
sgp_te_err = compute_average_sq_prediction_error(Y_te, s_pred)
mfgp_te_err = compute_average_sq_prediction_error(Y_te, mf_pred)
_print_float_results(reporter, 'Test Error', sgp_te_err, mfgp_te_err)
if __name__ == '__main__':
main()
| 3,305 | 35.733333 | 87 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/mf_func.py
|
"""
A collection of functions for managing multi-fidelity functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from utils.general_utils import map_to_cube, map_to_bounds
class MFFunction(object):
""" This just creates a wrapper to call the function by appropriately creating bounds
and querying appropriately. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
vectorised=True):
""" Constructor.
mf_func: takes two arguments mf_func(z, x) where z is the fidelity and x is
the point in the domain.
fidel_cost_func: fidel_cost_func(z) gives the cost of evaluating at z.
fidel_bounds, domain_bounds: are the bounds of the fidelity spaces, domains
resp.
vectorised: If True it means mf_func and fidel_cost_func can take
multiple inputs and produce multiple outputs. If False, the functions
can take only single inputs in 'column' form.
"""
self.mf_func = mf_func
self.fidel_cost_func = fidel_cost_func
self.fidel_bounds = np.array(fidel_bounds)
self.domain_bounds = np.array(domain_bounds)
self.fidel_dim = len(fidel_bounds)
self.domain_dim = len(domain_bounds)
self.vectorised = vectorised
# Wrappers for evaluating the function -------------------------------------------------
def eval_at_fidel_single_point(self, Z, X):
""" Evaluates X at the given Z at a single point. """
if not self.vectorised:
return float(self.mf_func(Z, X))
else:
Z = np.array(Z).reshape((1, self.fidel_dim))
X = np.array(X).reshape((1, self.domain_dim))
return float(self.mf_func(Z, X))
def eval_at_fidel_multiple_points(self, Z, X):
""" Evaluates X at the given Z at multiple points. """
if self.vectorised:
return self.mf_func(Z, X).ravel()
else:
ret = []
for i in range(len(Z)):
ret.append(self.eval_at_fidel_single_point(Z[i, :], X[i, :]))
return np.array(ret)
# Wrappers for evaluating the cost function --------------------------------------------
def eval_fidel_cost_single_point(self, Z):
""" Evaluates the cost function at a single point. """
if not self.vectorised:
return float(self.fidel_cost_func(Z))
else:
Z = np.array(Z).reshape((1, self.fidel_dim))
return float(self.fidel_cost_func(Z))
def eval_fidel_cost_multiple_points(self, Z):
""" Evaluates the cost function at multiple points. """
if self.vectorised:
return self.fidel_cost_func(Z).ravel()
else:
ret = []
for i in range(len(Z)):
ret.append(self.eval_fidel_cost_single_point(Z[i, :]))
return np.array(ret)
# Wrappers for evaluating at normalised points -----------------------------------------
def eval_at_fidel_single_point_normalised(self, Z, X):
""" Evaluates X at the given Z at a single point using normalised coordinates. """
Z, X = self.get_unnormalised_coords(Z, X)
return self.eval_at_fidel_single_point(Z, X)
def eval_at_fidel_multiple_points_normalised(self, Z, X):
""" Evaluates X at the given Z at multiple points using normalised coordinates. """
Z, X = self.get_unnormalised_coords(Z, X)
return self.eval_at_fidel_multiple_points(Z, X)
def eval_fidel_cost_single_point_normalised(self, Z):
""" Evaluates the cost function at a single point using normalised coordinates. """
Z, _ = self.get_unnormalised_coords(Z, None)
return self.eval_fidel_cost_single_point(Z)
def eval_fidel_cost_multiple_points_normalised(self, Z):
""" Evaluates the cost function at multiple points using normalised coordinates. """
Z, _ = self.get_unnormalised_coords(Z, None)
return self.eval_fidel_cost_multiple_points(Z)
# Maps to normalised coordinates and vice versa ----------------------------------------
def get_normalised_coords(self, Z, X):
""" Maps points in the original space to the cube. """
ret_Z = None if Z is None else map_to_cube(Z, self.fidel_bounds)
ret_X = None if X is None else map_to_cube(X, self.domain_bounds)
return ret_Z, ret_X
def get_unnormalised_coords(self, Z, X):
""" Maps points in the cube to the original space. """
ret_Z = None if Z is None else map_to_bounds(Z, self.fidel_bounds)
ret_X = None if X is None else map_to_bounds(X, self.domain_bounds)
return ret_Z, ret_X
# MFFunction ends here ===================================================================
class MFOptFunction(MFFunction):
""" A class which we will use for MF Optimisation. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel_unnormalised, vectorised=True, opt_pt=None, opt_val=None):
""" Constructor.
mf_func: takes two arguments mf_func(z, x) where z is the fidelity and x is
the point in the domain.
fidel_cost_func: fidel_cost_func(z) gives the cost of evaluating at z.
fidel_bounds, domain_bounds: are the bounds of the fidelity spaces, domains
resp.
opt_fidel: The point in the fidelity space at which we want to optimise.
vectorised: If True it means mf_func and fidel_cost_func can take
multiple inputs and produce multiple outputs. If False, the functions
can take only single inputs in 'column' form.
opt_pt, opt_val: The optimum point and value in the domain.
"""
super(MFOptFunction, self).__init__(mf_func, fidel_cost_func, fidel_bounds,
domain_bounds, vectorised)
self.opt_fidel_unnormalised = np.array(opt_fidel_unnormalised).ravel()
self.opt_fidel, _ = self.get_normalised_coords(opt_fidel_unnormalised, None)
if len(self.opt_fidel) != self.fidel_dim:
raise ValueError('opt_fidel should be a %d-vector.'%(self.fidel_dim))
self.opt_fidel_cost = self.cost_single(self.opt_fidel)
# Set the optimisation point.
self.opt_pt = opt_pt
self.opt_val = opt_val
self.mfgp = None # we will need this later on.
self.finite_fidels = None
self.is_finite = False
# Evaluation ---------------------------------------------------------------------------
def eval_single(self, Z, X):
""" Evaluate at a single point. """
return self.eval_at_fidel_single_point_normalised(Z, X)
def eval_multiple(self, Z, X):
""" Evaluate at multiple points. """
return self.eval_at_fidel_multiple_points_normalised(Z, X)
def eval(self, Z, X):
""" Executes either eval_single or eval_multiple. """
if len(Z.shape) == 1:
return self.eval_single(Z, X)
elif len(Z.shape) == 2:
return self.eval_multiple(Z, X)
else:
raise ValueError('Z should be either a vector or matrix.')
# Cost ---------------------------------------------------------------------------------
def cost_single(self, Z):
""" Evaluates cost at a single point. """
return self.eval_fidel_cost_single_point_normalised(Z)
def cost_multiple(self, Z):
""" Evaluates cost at multiple points. """
return self.eval_fidel_cost_multiple_points_normalised(Z)
def cost(self, Z):
""" Executes either cost_single or cost_multiple. """
if len(Z.shape) == 1:
return self.cost_single(Z)
elif len(Z.shape) == 2:
return self.cost_multiple(Z)
else:
raise ValueError('Z should be either a vector or matrix.')
# Other --------------------------------------------------------------------------------
def get_cost_ratio(self, Z1, Z2=None):
""" Obtains the ration between the costs. """
if Z2 is None:
cost_Z2 = self.opt_fidel_cost
else:
cost_Z2 = self.cost(Z2)
return self.cost(Z1)/cost_Z2
def get_candidate_fidelities(self, filter_by_cost=True):
""" Gets candidate fidelities. If filter_by_cost is True then it doesn't return those
whose cost is larger than opt_cost_fidel. """
# Determine the candidates randomly
if self.is_finite:
return self.get_candidate_fidelities_finite()
if self.fidel_dim == 1:
candidates = np.linspace(0, 1, 200).reshape((-1, 1))
elif self.fidel_dim == 2:
num_per_dim = 25
candidates = (np.indices((num_per_dim, num_per_dim)).reshape(2, -1).T + 0.5) / \
float(num_per_dim)
elif self.fidel_dim == 3:
num_per_dim = 10
cand_1 = (np.indices((num_per_dim, num_per_dim, num_per_dim)).reshape(3, -1).T
+ 0.5) / float(num_per_dim)
cand_2 = np.random.random((1000, self.fidel_dim))
candidates = np.vstack((cand_1, cand_2))
else:
candidates = np.random.random((4000, self.fidel_dim))
# To filter by cost?
if filter_by_cost:
fidel_costs = self.cost_multiple(candidates)
filtered_idxs = fidel_costs < self.opt_fidel_cost
candidates = candidates[filtered_idxs, :]
# Finally add the highest fidelity.
candidates = np.vstack((self.opt_fidel.reshape((1, self.fidel_dim)), candidates))
return candidates
def set_finite_fidels(self, finite_fidels_raw, is_normalised):
""" Sets the finite fidels. """
self.is_finite = True
if is_normalised:
self.finite_fidels = finite_fidels_raw
else:
self.finite_fidels_unnormalised = finite_fidels_raw
self.finite_fidels, _ = self.get_normalised_coords(finite_fidels_raw, None)
def get_candidate_fidelities_finite(self):
""" Gets the finite candidate fidelities. """
candidates = np.repeat(self.finite_fidels, 100, axis=0)
np.random.shuffle(candidates)
candidates = candidates[1:500, :]
candidates = np.vstack((self.opt_fidel.reshape((1, self.fidel_dim)), candidates))
return candidates
# MFOptFunction ends here ================================================================
class NoisyMFOptFunction(MFOptFunction):
""" Child class of MFOptFunction which also adds noise to the evaluations. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel_unnormalised, noise_var, noise_type='gauss',
*args, **kwargs):
""" Constructor. See MFOptFunction and MFFunction for args. """
super(NoisyMFOptFunction, self).__init__(mf_func, fidel_cost_func, fidel_bounds,
domain_bounds, opt_fidel_unnormalised, *args, **kwargs)
self.noise_var = noise_var
self.noise_type = noise_type
# Noise functions ----------------------------------------------------------------------
def noise_multiple(self, num_samples):
""" Returns noise. """
if self.noise_type == 'gauss':
return np.random.normal(scale=np.sqrt(self.noise_var), size=(num_samples))
else:
raise NotImplementedError('Only implemented gauss noise so far. ')
def noise_single(self):
""" Single noise value. """
return float(self.noise_multiple(1))
# Override evaluation functions to add noise. ------------------------------------------
def eval_single_noiseless(self, Z, X):
""" Evaluate at a single point. """
return super(NoisyMFOptFunction, self).eval_single(Z, X)
def eval_multiple_noiseless(self, Z, X):
""" Evaluate at multiple points. """
return super(NoisyMFOptFunction, self).eval_multiple(Z, X)
def eval_single(self, Z, X):
""" Evaluate at a single point. """
return self.eval_single_noiseless(Z, X) + self.noise_single()
def eval_multiple(self, Z, X):
""" Evaluate at multiple points. """
return self.eval_multiple_noiseless(Z, X) + self.noise_multiple(len(Z))
def get_noisy_mfof_from_mfof(mfof, noise_var, noise_type='gauss', additional_attrs=None):
""" Returns a noisy mfof object from an mfof object. """
nmfof = NoisyMFOptFunction(mfof.mf_func, mfof.fidel_cost_func, mfof.fidel_bounds,
mfof.domain_bounds, mfof.opt_fidel_unnormalised, noise_var,
noise_type=noise_type,
vectorised=mfof.vectorised,
opt_pt=mfof.opt_pt,
opt_val=mfof.opt_val,
)
if additional_attrs is None:
additional_attrs = ['init_mfgp', 'mfgp']
for attr in additional_attrs:
if hasattr(mfof, attr):
setattr(nmfof, attr, getattr(mfof, attr))
return nmfof
# NOisyMFOptFunction ends here ===========================================================
| 12,542 | 41.090604 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/__init__.py
|
"""
Implements methods for creating and using multi-fidelity gaussian processes.
-- [email protected]
"""
| 114 | 18.166667 | 78 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/unittest_mf_gpb_utils.py
|
"""
Unit tests for MF-GP-Bandit Utilities.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
from copy import deepcopy
from argparse import Namespace
import numpy as np
# Local
from gen_mfgp_sample import gen_simple_mfgp_as_mfof
from mf_func import MFOptFunction
import mf_gpb_utils
from utils.base_test_class import BaseTestClass, execute_tests
from unittest_mf_gp import get_init_and_post_gp
from utils.ancillary_utils import is_non_decreasing_sequence
def _get_mfgp_instances(prob_params=None):
""" Generates a bunch of GP-UCB instances. """
# pylint: disable=star-args
# The following list of lists maintains each problem instance in the following
# order. (fidel_dim, domain_dim, num_data, kernel_scale, dim_bw_power)
if prob_params is None:
prob_params = [[1, 2, 40, 2, 0.5], [2, 4, 10, 1, 0], [4, 20, 40, 10, 0],
[3, 10, 10, 4, 0.5]]
instances = [get_init_and_post_gp(*prob) for prob in prob_params]
for inst in instances:
inst.opt_fidel = 0.9 + 0.1 * np.random.random((inst.fidel_dim))
instances[-1].opt_fidel = np.ones((instances[-1].fidel_dim))
return instances
class MFGPBUtilsTestCase(BaseTestClass):
""" Unit tests for mf_gpb_utils.py """
def setUp(self):
""" Sets up unit tests. """
self.lhs_data = [(1, 10), (2, 5), (4, 10), (10, 100)]
def test_latin_hc_indices(self):
""" Tests latin hyper-cube index generation. """
self.report('Test Latin hyper-cube indexing. Only a sufficient condition check.')
for data in self.lhs_data:
lhs_true_sum = data[1] * (data[1] - 1) / 2
lhs_idxs = mf_gpb_utils.latin_hc_indices(data[0], data[1])
lhs_idx_sums = np.array(lhs_idxs).sum(axis=0)
assert np.all(lhs_true_sum == lhs_idx_sums)
def test_latin_hc_sampling(self):
""" Tests latin hyper-cube sampling. """
self.report('Test Latin hyper-cube sampling. Only a sufficient condition check.')
for data in self.lhs_data:
lhs_max_sum = float(data[1] + 1)/2
lhs_min_sum = float(data[1] - 1)/2
lhs_samples = mf_gpb_utils.latin_hc_sampling(data[0], data[1])
lhs_sample_sums = lhs_samples.sum(axis=0)
assert lhs_sample_sums.max() <= lhs_max_sum
assert lhs_sample_sums.min() >= lhs_min_sum
class AcquisitionTestCase(BaseTestClass):
""" Test class for the Acquisitions. """
def test_mf_gp_ucb_1(self):
""" Tests the mf-gp-ucb acquisition using a sample from gen_mfgp_sample_as_mfof. """
self.report('Tests mf-gp-ucb acquisition using sample from gen_simple_mfgp_as_mfof.')
mfof = gen_simple_mfgp_as_mfof(random_seed=np.random.randint(1000))
mfgp = deepcopy(mfof.mfgp)
report_time_steps = set([int(x) for x in
np.logspace(np.log10(5), np.log10(1000), 20)])
report_time_steps = sorted(list(report_time_steps))
prev_time = 0
num_test_pts = 200
losses = []
for t in report_time_steps:
# Add new points to the GP
num_new_points = t - prev_time
Z_new = np.random.random((num_new_points, mfof.fidel_dim))
X_new = np.random.random((num_new_points, mfof.domain_dim))
Y_new = mfof.eval_multiple(Z_new, X_new)
mfgp.add_mf_data(Z_new, X_new, Y_new)
prev_time = t
# Tests
X_test = np.random.random((num_test_pts, mfof.domain_dim))
opt_fidel_mat = np.repeat(mfof.opt_fidel.reshape(1, -1), num_test_pts, axis=0)
F_test = mfof.eval_multiple(opt_fidel_mat, X_test)
ucb_test, _ = mf_gpb_utils.acquisitions.mf_gp_ucb_multiple(X_test, mfgp,
mfof.opt_fidel, t)
below_ucb = F_test < ucb_test
coverage = below_ucb.mean()
losses.append(1-coverage)
total_loss = sum(losses)
assert total_loss < 0.01
result_str = ' (DZ, DX) = (%d, %d) Loss for this instance: %0.4f'%(
mfof.fidel_dim, mfof.domain_dim, total_loss)
self.report(result_str, 'test_result')
def test_mf_gp_ucb_2(self):
""" Tests the mf-gp-ucb acquisition. """
# pylint: disable=too-many-locals
self.report('Testing Coverage of mf-gp-ucb Acq. Probabilistic test, might fail.')
report_time_steps = set([int(x) for x in
np.logspace(np.log10(5), np.log10(1000), 20)])
report_time_steps = sorted(list(report_time_steps))
instances = _get_mfgp_instances()
all_losses = []
# Now run test
for inst in instances:
num_test_points = 100 * inst.domain_dim
post_gp = inst.post_gp
inst_losses = []
beta_th_vals = []
prev_time = 0
for t in report_time_steps:
# First add new points to the GP.
num_new_points = t - prev_time
X_new = np.random.random((num_new_points, inst.domain_dim))
Z_new = np.random.random((num_new_points, inst.fidel_dim))
Y_new = (inst.post_gp.draw_mf_samples(1, Z_new, X_new).ravel() +
np.random.normal(0, np.sqrt(inst.noise_var), (num_new_points,)))
inst.post_gp.add_mf_data(Z_new, X_new, Y_new, rebuild=True)
prev_time = t
# Now do the tests.
assert post_gp.num_tr_data == t + inst.num_data
X_test = np.random.random((num_test_points, inst.domain_dim))
opt_fidel_mat = np.repeat(inst.opt_fidel.reshape(1, -1), num_test_points, axis=0)
F_test = post_gp.draw_mf_samples(1, opt_fidel_mat, X_test).ravel()
ucb_test, beta_th = mf_gpb_utils.acquisitions.mf_gp_ucb_multiple(X_test,
inst.post_gp, inst.opt_fidel, t)
below_ucb = F_test < ucb_test
coverage = below_ucb.mean()
inst_losses.append(1 - coverage)
beta_th_vals.append(beta_th)
# manually compute coverage
mu, sigma = post_gp.eval_at_fidel(opt_fidel_mat, X_test, uncert_form='std')
manual_conf = mu + beta_th * sigma
manual_coverage = (F_test < manual_conf).mean()
assert manual_coverage == coverage
# Report results for this instance
inst_result_str = ', '.join('%d: %0.3f (%0.2f)'%(report_time_steps[i],
inst_losses[i], beta_th_vals[i]) for i in range(len(report_time_steps)))
inst_result_str = '(DZ, DX) = (%d, %d):: coverage %s'%(inst.fidel_dim,
inst.domain_dim, inst_result_str)
self.report(inst_result_str, 'test_result')
total_inst_loss = sum(inst_losses)
all_losses.append(total_inst_loss)
inst_avg_result_str = ' (DZ, DX) = (%d, %d) Loss for this instance: %0.4f'%(
inst.fidel_dim, inst.domain_dim, total_inst_loss)
self.report(inst_avg_result_str, 'test_result')
assert is_non_decreasing_sequence(beta_th_vals)
assert np.all(beta_th_vals <=
2 * inst.domain_dim * np.sqrt(np.log(report_time_steps)))
# Final accumulation
avg_inst_loss = np.array(all_losses).mean()
loss_thresh = 0.02
avg_loss_is_good = avg_inst_loss < loss_thresh
self.report('Avg loss (%0.3f) is smaller than %f? %d'%(avg_inst_loss,
loss_thresh, avg_loss_is_good), 'test_result')
assert avg_loss_is_good
class FidelityChoosersTestCase(BaseTestClass):
""" Test class for the Acquisitions. """
@classmethod
def _get_mfof_obj(cls, fidel_dim, domain_dim, opt_fidel):
""" Returns an MFOPTFunction object. """
g = lambda z, x: (x**2).sum(axis=1) + ((2*z)**2).sum(axis=1)
cost = lambda z: 1 + (z**1.5).sum(axis=1)
vectorised = True
fidel_bounds = np.array([[0, 1]] * fidel_dim)
domain_bounds = np.array([[0, 1]] * domain_dim)
return MFOptFunction(g, cost, fidel_bounds, domain_bounds, opt_fidel, vectorised)
def test_mf_gp_ucb(self):
"""" Tests the mf-gp-ucb acquisition chooser. """
# pylint: disable=too-many-locals
self.report('Testing mf-gp-ucb Fidelity chooser.')
prob_params = [[1, 2, 40, 2, 0.5], [3, 10, 10, 4, 0.5], [4, 20, 100, 1.5, 0.1]]
num_next_pts = 5
instances = _get_mfgp_instances(prob_params)
for inst in instances:
next_pts = np.random.random((num_next_pts, inst.domain_dim))
curr_time = inst.post_gp.num_tr_data
_, beta_th = mf_gpb_utils.acquisitions.mf_gp_ucb_multiple(next_pts,
inst.post_gp, inst.opt_fidel, curr_time)
thresh_coeff = 0.5 + 0.5 * np.random.random()
acq_params = Namespace(beta_th=beta_th, thresh_coeff=thresh_coeff)
for next_pt in next_pts:
mfof = self._get_mfof_obj(inst.post_gp.fidel_dim, inst.post_gp.domain_dim,
inst.opt_fidel)
# Determine the next fidelity
next_fidel = mf_gpb_utils.fidelity_choosers.mf_gp_ucb(next_pt,
inst.post_gp, mfof, acq_params)
# Compute the threshold independently
cost_ratio_power = 1/float(inst.post_gp.domain_dim + inst.post_gp.fidel_dim + 2)
std_thresh = thresh_coeff * (mfof.get_cost_ratio(next_fidel) ** cost_ratio_power *
np.sqrt(inst.post_gp.kernel.scale) *
inst.post_gp.fidel_kernel.compute_std_slack(next_fidel.reshape(1, -1),
inst.opt_fidel.reshape(1, -1)))
# Compute the std
_, next_fidel_std = inst.post_gp.eval_at_fidel(next_fidel.reshape(1, -1),
next_pt.reshape(1, -1), uncert_form='std')
next_fidel_std = float(next_fidel_std)
# Test
is_opt_fidel = mf_gpb_utils.is_an_opt_fidel_query(next_fidel, inst.opt_fidel)
is_larger_than_thresh = next_fidel_std >= std_thresh
self.report(('(DZ, DX, n) = (%d, %d, %d):: threshold: %0.4f, std: %0.4f ' +
'is_larger_than_thresh: %d, is_opt_fidel: %d')%(inst.fidel_dim,
inst.domain_dim, inst.post_gp.num_tr_data, std_thresh,
next_fidel_std, is_larger_than_thresh, is_opt_fidel),
'test_result')
assert is_opt_fidel or is_larger_than_thresh
if __name__ == '__main__':
execute_tests()
| 10,047 | 43.070175 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/mf/unittest_mf_func.py
|
"""
Unit tests for mf_func.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=superfluous-parens
# pylint: disable=maybe-no-member
from argparse import Namespace
import numpy as np
# Local
import mf_func
from utils.base_test_class import BaseTestClass, execute_tests
from utils.general_utils import map_to_bounds
# Functions for preparing data -----------------------------------------------------------
def get_mf_func_data():
""" Prepares data. """
# pylint: disable=too-many-locals
# Function 1 - vectorised
g_1 = lambda z, x: (x**2).sum(axis=1) + ((2*z)**2).sum(axis=1)
cost = lambda z: z[:, 0] * (z[:, 1]**2)
vectorised = True
fidel_bounds = np.array([[1, 4], [5, 6]])
domain_bounds = np.array([[-1, 2], [0, 1], [1, 11]])
dz = len(fidel_bounds)
dx = len(domain_bounds)
opt_fidel = np.array([3.2, 5.8])
opt_fidel_cost = float(cost(opt_fidel.reshape((1, dz))))
mff = mf_func.MFFunction(g_1, cost, fidel_bounds, domain_bounds, vectorised)
mfof = mf_func.MFOptFunction(g_1, cost, fidel_bounds, domain_bounds, opt_fidel,
vectorised)
func_1 = Namespace(g=g_1, cost=cost, dz=dz, dx=dx, vectorised=vectorised,
fidel_bounds=fidel_bounds, domain_bounds=domain_bounds, mfof=mfof,
mff=mff, opt_fidel=opt_fidel, opt_fidel_cost=opt_fidel_cost)
# Function 2 - Same as Function 1 but we ravel
g_2 = lambda z, x: (g_1(z, x)).ravel()
func_2 = Namespace(g=g_2, cost=cost, dz=dz, dx=dx, vectorised=vectorised,
fidel_bounds=fidel_bounds, domain_bounds=domain_bounds, mfof=mfof,
mff=mff, opt_fidel=opt_fidel, opt_fidel_cost=opt_fidel_cost)
# Function 3 - not vectorised
g_3 = lambda z, x: np.cos(z**2) * (np.sin(x)).sum()
cost = lambda z: z[0]**3
dz = 1
dx = 3
vectorised = False
fidel_bounds = np.array([[3, 6]])
domain_bounds = np.array([[-4, 2], [-1, 4], [21, 41]])
dz = len(fidel_bounds)
dx = len(domain_bounds)
opt_fidel = np.array([5.7])
opt_fidel_cost = float(cost(opt_fidel))
mff = mf_func.MFFunction(g_3, cost, fidel_bounds, domain_bounds, vectorised)
mfof = mf_func.MFOptFunction(g_3, cost, fidel_bounds, domain_bounds, opt_fidel,
vectorised)
func_3 = Namespace(g=g_3, cost=cost, dz=dz, dx=dx, vectorised=vectorised,
fidel_bounds=fidel_bounds, domain_bounds=domain_bounds, mfof=mfof,
mff=mff, opt_fidel=opt_fidel, opt_fidel_cost=opt_fidel_cost)
# Function 4 - not vectorised
g_4 = lambda z, x: float(g_3(z, x))
func_4 = Namespace(g=g_4, cost=cost, dz=dz, dx=dx, vectorised=vectorised,
fidel_bounds=fidel_bounds, domain_bounds=domain_bounds, mfof=mfof,
mff=mff, opt_fidel=opt_fidel, opt_fidel_cost=opt_fidel_cost)
# Return all functions
return [func_1, func_2, func_3, func_4]
# Some functions we will need for testing ------------------------------------------------
def _get_test_points(dz, dx, z_bounds, x_bounds, n=5):
""" Gets test points. """
single_nz = np.random.random(dz)
single_nx = np.random.random(dx)
mult_nz = np.random.random((n, dz))
mult_nx = np.random.random((n, dx))
single_z = map_to_bounds(single_nz, z_bounds)
single_x = map_to_bounds(single_nx, x_bounds)
mult_z = map_to_bounds(mult_nz, z_bounds)
mult_x = map_to_bounds(mult_nx, x_bounds)
return (single_nz, single_nx, single_z, single_x,
mult_nz, mult_nx, mult_z, mult_x)
def _get_gvals(single_z, single_x, mult_z, mult_x, func):
""" Evaluates the function at the test points and returns the values. """
if func.vectorised:
single_gvals = float(func.g(single_z.reshape((1, func.dz)),
single_x.reshape((1, func.dx))))
mult_gvals = func.g(mult_z, mult_x).ravel()
else:
single_gvals = float(func.g(single_z, single_x))
mult_gvals = []
for i in range(len(mult_z)):
mult_gvals.append(float(func.g(mult_z[i, :], mult_x[i, :])))
mult_gvals = np.array(mult_gvals)
return single_gvals, mult_gvals
def _get_mff_vals_unnorm(single_z, single_x, mult_z, mult_x, func):
""" Evaluates mff at the test points with unnormalised coordiantes. """
single_mff_vals = func.mff.eval_at_fidel_single_point(single_z, single_x)
mult_mff_vals = func.mff.eval_at_fidel_multiple_points(mult_z, mult_x)
return single_mff_vals, mult_mff_vals
def _get_mff_vals_norm(single_nz, single_nx, mult_nz, mult_nx, func):
""" Evaluates mff at the test points with unnormalised coordiantes. """
single_mff_vals = func.mff.eval_at_fidel_single_point_normalised(single_nz, single_nx)
mult_mff_vals = func.mff.eval_at_fidel_multiple_points_normalised(mult_nz, mult_nx)
return single_mff_vals, mult_mff_vals
def _get_cost_vals(single_z, mult_z, func):
""" Evaluates the function at the test points and returns the values. """
if func.vectorised:
single_gvals = float(func.cost(single_z.reshape((1, func.dz))))
mult_gvals = func.cost(mult_z).ravel()
else:
single_gvals = float(func.cost(single_z))
mult_gvals = []
for i in range(len(mult_z)):
mult_gvals.append(func.cost(mult_z[i, :]))
mult_gvals = np.array(mult_gvals)
return single_gvals, mult_gvals
def _get_mff_cost_vals_unnorm(single_z, mult_z, func):
""" Evaluates mff for cost at the test points with unnormalised coordiantes. """
single_mff_cost_vals = func.mff.eval_fidel_cost_single_point(single_z)
mult_mff_cost_vals = func.mff.eval_fidel_cost_multiple_points(mult_z)
return single_mff_cost_vals, mult_mff_cost_vals
def _get_mff_cost_vals_norm(single_nz, mult_nz, func):
""" Evaluates mff for cost at the test points with unnormalised coordiantes. """
single_mff_cost_vals = func.mff.eval_fidel_cost_single_point_normalised(single_nz)
mult_mff_cost_vals = func.mff.eval_fidel_cost_multiple_points_normalised(mult_nz)
return single_mff_cost_vals, mult_mff_cost_vals
# Test Cases -----------------------------------------------------------------------------
class MFFunctionTestCase(BaseTestClass):
""" Unit tests for MFFunction. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for the tests. """
self.functions = get_mf_func_data()
def test_eval(self):
""" Tests evaluation at single and multiple points using normalised and unnormalised
coordinates """
self.report(('Test eval at single/multiple points using normalised/unnormalised ' +
'coordinates.'))
for func in self.functions:
single_nz, single_nx, single_z, single_x, mult_nz, mult_nx, mult_z, mult_x = \
_get_test_points(func.dz, func.dx, func.fidel_bounds, func.domain_bounds)
single_gvals, mult_gvals = _get_gvals(single_z, single_x, mult_z, mult_x, func)
single_n_mffvals, mult_n_mffvals = _get_mff_vals_norm(single_nz, single_nx,
mult_nz, mult_nx, func)
single_mffvals, mult_mffvals = _get_mff_vals_unnorm(single_z, single_x,
mult_z, mult_x, func)
assert abs(single_n_mffvals - single_gvals) < 1e-5
assert abs(single_mffvals - single_gvals) < 1e-5
assert np.linalg.norm(mult_n_mffvals - mult_gvals) < 1e-5
assert np.linalg.norm(mult_mffvals - mult_gvals) < 1e-5
def test_cost_eval(self):
""" Tests evaluation of the cost function at single and multiple points using
normalised and unnormalised coordinates """
self.report(('Test evaluation of cost function at single/multiple points using' +
' normalised/unnormalised coordinates.'))
for func in self.functions:
single_nz, _, single_z, _, mult_nz, _, mult_z, _ = \
_get_test_points(func.dz, func.dx, func.fidel_bounds, func.domain_bounds)
single_cost_vals, mult_cost_vals = _get_cost_vals(single_z, mult_z, func)
single_n_mff_cost_vals, mult_n_mff_cost_vals = _get_mff_cost_vals_norm(single_nz,
mult_nz, func)
single_mff_cost_vals, mult_mff_cost_vals = _get_mff_cost_vals_unnorm(single_z,
mult_z, func)
assert abs(single_n_mff_cost_vals - single_cost_vals) < 1e-5
assert abs(single_mff_cost_vals - single_cost_vals) < 1e-5
assert np.linalg.norm(mult_n_mff_cost_vals - mult_cost_vals) < 1e-5
assert np.linalg.norm(mult_mff_cost_vals - mult_cost_vals) < 1e-5
class MFOptFunctionTestCase(BaseTestClass):
""" Unit tests for MFOptFunction. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for the tests. """
self.functions = get_mf_func_data()
def test_cost_ratio(self):
""" Tests evaluation of cost ratio. """
self.report('Testing cost ratio.')
for func in self.functions:
single_nz, _, single_z, _, mult_nz, _, mult_z, _ = \
_get_test_points(func.dz, func.dx, func.fidel_bounds, func.domain_bounds)
single_cost_vals, mult_cost_vals = _get_cost_vals(single_z, mult_z, func)
single_cost_ratios = single_cost_vals / func.opt_fidel_cost
mult_cost_ratios = mult_cost_vals / func.opt_fidel_cost
single_mff_crs = func.mfof.get_cost_ratio(single_nz)
mult_mff_crs = func.mfof.get_cost_ratio(mult_nz)
assert abs(single_mff_crs - single_cost_ratios) < 1e-5
assert np.linalg.norm(mult_mff_crs - mult_cost_ratios) < 1e-5
def test_eval(self):
""" Tests evaluation. """
self.report('Testing evaluation.')
for func in self.functions:
single_nz, single_nx, single_z, single_x, mult_nz, mult_nx, mult_z, mult_x = \
_get_test_points(func.dz, func.dx, func.fidel_bounds, func.domain_bounds)
single_gvals, mult_gvals = _get_gvals(single_z, single_x, mult_z, mult_x, func)
single_n_mffvals = func.mfof.eval(single_nz, single_nx)
mult_n_mffvals = func.mfof.eval(mult_nz, mult_nx)
assert abs(single_n_mffvals - single_gvals) < 1e-5
assert np.linalg.norm(mult_n_mffvals - mult_gvals) < 1e-5
def test_get_candidate_fidels(self):
""" Tests obtaining candidate fidelities. """
self.report('Testing obtaining of candidate fidelities.')
mf_g = self.functions[0].g # This has to be vectorised !!!
domain_bounds = self.functions[0].domain_bounds
dim_vals = [1, 2, 3, 5, 10]
for dim in dim_vals:
fidel_bounds = [[0, 1]] * dim
opt_fidel = np.random.random(dim) * 0.3 + 0.5
mf_cost = lambda z: (z**1.5 * (np.array(range(dim)) + 0.1)).sum(axis=1)
mfof = mf_func.MFOptFunction(mf_g, mf_cost, fidel_bounds, domain_bounds, opt_fidel,
vectorised=True)
filt_candidates = mfof.get_candidate_fidelities()
raw_candidates = mfof.get_candidate_fidelities(filter_by_cost=False)
num_filt_cands = len(filt_candidates)
num_raw_cands = len(raw_candidates)
filt_cost_ratios = mfof.get_cost_ratio(filt_candidates)
filt_equal = (filt_cost_ratios == 1.0).sum()
filt_less = (filt_cost_ratios < 1.0).sum()
# Tests
assert len(filt_candidates.shape) == 2
assert len(raw_candidates.shape) == 2
assert filt_candidates.shape[1] == mfof.fidel_dim
assert raw_candidates.shape[1] == mfof.fidel_dim
assert num_filt_cands <= num_raw_cands
assert filt_equal == 1
assert filt_less == num_filt_cands - 1
class NoisyMFOptFunctionTestCase(BaseTestClass):
""" Unit tests for the NoisyMFOptFunction class. """
def setUp(self):
""" Set up for the tests. """
self.functions = get_mf_func_data()
def test_noisy_eval(self):
""" Tests evaluation. """
self.report('Testing Noisy evaluation. Probabilisitic test, might fail.')
for func in self.functions:
curr_noise_var = 0.2 + 0.3 * np.random.random()
curr_noise_std = np.sqrt(curr_noise_var)
single_nz, single_nx, single_z, single_x, mult_nz, mult_nx, mult_z, mult_x = \
_get_test_points(func.dz, func.dx, func.fidel_bounds, func.domain_bounds, n=10000)
single_gvals, mult_gvals = _get_gvals(single_z, single_x, mult_z, mult_x, func)
# Now get noisy values
noisy_mfof = mf_func.get_noisy_mfof_from_mfof(func.mfof, curr_noise_var, 'gauss')
noisy_single_n_mffvals = noisy_mfof.eval(single_nz, single_nx)
noisy_mult_n_mffvals = noisy_mfof.eval(mult_nz, mult_nx)
mult_diff_std = (noisy_mult_n_mffvals - mult_gvals).std()
self.report('Noisy test single: true: %0.4f, noisy: %0.4f'%(single_gvals,
noisy_single_n_mffvals), 'test_result')
self.report('Noisy test multiple: true-std: %0.4f, est-std: %0.4f'%(
curr_noise_std, mult_diff_std), 'test_result')
assert abs(noisy_single_n_mffvals - single_gvals) < 5 * curr_noise_std
assert abs(mult_diff_std - curr_noise_std) < 0.05
if __name__ == '__main__':
execute_tests()
| 13,043 | 45.091873 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synthetic.py
|
"""
Running experiments for the synthetic functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import numpy as np
# Local imports
from mf.mf_gp_bandit import all_mf_gp_bandit_args
from mf.mfopt_experimenters import MFOptExperimenter
from mf.mf_func import get_noisy_mfof_from_mfof
from mf.gen_mfgp_sample import gen_simple_mfgp_as_mfof
import synthetic_functions
from utils.option_handler import load_options
from utils.reporters import get_reporter
#NOISY = True
NOISY = False
# Debug or not
#IS_DEBUG = True
IS_DEBUG = False
# Choose experiment
# EXP_NAME = 'GP'
# EXP_NAME = 'GP-Bad-Approx'
#EXP_NAME = 'Hartmann3'
# EXP_NAME = 'Hartmann3b'
# EXP_NAME = 'Hartmann3c'
# EXP_NAME = 'Hartmann6'
# EXP_NAME = 'Hartmann6b'
# EXP_NAME = 'CurrinExp'
#EXP_NAME = 'Branin'
EXP_NAME = 'Borehole'
# Set parameters
# NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'direct']
NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
# NONFINITE_METHODS = ['gp_ucb']
# NONFINITE_METHODS = ['gp_ucb', 'direct']
FINITE_METHODS = ['mf_gp_ucb_finite', 'mf_sko']
# FINITE_METHODS = ['mf_gp_ucb_finite']
# FINITE_METHODS = []
NUM_EXPERIMENTS = 10
SAVE_RESULTS_DIR = './examples/results'
def get_problem_parameters(options):
""" Returns the problem parameters. """
prob = Namespace()
if EXP_NAME == 'GP':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=1)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'GP-Bad-Approx':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=0.01)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann3c':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(4, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
# Add finite fidels
options.finite_fidels = np.array([[0.333] * mfof.fidel_dim, [0.667] * mfof.fidel_dim])
options.finite_fidels_is_normalised = True
# If NOISY, get noisy version
if NOISY:
mfof = get_noisy_mfof_from_mfof(mfof, noise_var)
# is debug
if IS_DEBUG:
max_capital = 20 * mfof.opt_fidel_cost
num_experiments = 3
experiment_name = 'debug-%s'%(EXP_NAME)
else:
experiment_name = EXP_NAME
num_experiments = NUM_EXPERIMENTS
# Return everything in this namespace
prob = Namespace(mfof=mfof, max_capital=max_capital, noisy=NOISY,
num_experiments=num_experiments, experiment_name=experiment_name)
return prob, options
def main():
""" Main function. """
options = load_options(all_mf_gp_bandit_args)
prob, options = get_problem_parameters(options)
# Set other variables
all_methods = NONFINITE_METHODS + FINITE_METHODS
method_options = {key: options for key in all_methods}
noisy_str = 'noiseless' if not NOISY else 'noisy%0.3f'%(prob.mfof.noise_var)
save_file_prefix = '%s-%s-p%d-d%d'%(prob.experiment_name, noisy_str,
prob.mfof.fidel_dim,
prob.mfof.domain_dim)
reporter = get_reporter('default')
experimenter = MFOptExperimenter(prob.experiment_name, prob.mfof, prob.max_capital,
all_methods, prob.num_experiments, SAVE_RESULTS_DIR,
save_file_prefix=save_file_prefix,
method_options=method_options,
method_reporter=reporter,
reporter=reporter)
experimenter.run_experiments()
if __name__ == '__main__':
main()
| 4,771 | 31.462585 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synth_POO.py
|
import numpy as np
import Queue
from mf.mf_func import MFOptFunction
from utils.general_utils import map_to_cube
import sys
from examples.synthetic_functions import *
from mf.mf_func import get_noisy_mfof_from_mfof
from letters.letters_classifier import *
import time
from MFTree.POO import *
import synthetic_functions
NUM_EXP = 5
EXP_NAME = 'Branin'
#EXP_NAME = 'CurrinExp'
#EXP_NAME = 'Hartmann3'
#EXP_NAME = 'Hartmann6'
#EXP_NAME = 'Borehole'
def run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filname):
R = []
T = []
for t in times:
budget = t*mfobject.opt_fidel_cost
t1 = time.time()
MP = POO(mfobject=mfobject, nu_max=nu, rho_max=rho, total_budget=budget, sigma=sigma, C=C, mult=0.5, tol = 1e-3, Randomize = False, Auto = False, unit_cost=t0 )
MP.run_all_MFHOO()
X, E = MP.get_point()
t2 = time.time()
R = R + [E]
T = T + [MP.cost]
#print 'Total HOO Queries: ' + str(MP.t)
print str(MP.cost) + ' : ' + str(E)
np.save(filename,R)
return np.array(R),np.array(T)
if __name__ == '__main__':
if EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
noise_var = 0.01
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(1)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
sigma = np.sqrt(noise_var)
times = [10,20,50,75,100,150,175, 200]
mfobject = get_noisy_mfof_from_mfof(mfof, noise_var)
nu = 1.0
rho = 0.95
C = 0.02
t0 = mfobject.opt_fidel_cost
NT = str(time.time())
print 'Running Experiment 1: '
filename = 'POO_' + EXP_NAME + '_' + NT + '_' + '1.npy'
R,T = run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filename)
result = R
for i in range(1,NUM_EXP):
print 'Running Experiment' + str(i+1) + ': '
filename = 'POO_' + EXP_NAME + '_' + NT + '_' + str(i+1) + '.npy'
R,T = run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filename)
result = np.vstack([result,R])
mu = np.mean(result,axis = 0)
std = np.std(result,axis = 0)
result = mfobject.opt_val - mu
filename = './examples/results/POO_' + EXP_NAME + '_' + NT + '_' + '.csv'
dfdic = {}
dfdic['Capital'] = np.array(times)
dfdic['Value'] = result
dfdic['Std'] = std
df = pd.DataFrame(dfdic)
df.to_csv(filename)
| 2,791 | 24.851852 | 162 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synthetic_curin.py
|
"""
Running experiments for the synthetic functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import numpy as np
# Local imports
from mf.mf_gp_bandit import all_mf_gp_bandit_args
from mf.mfopt_experimenters import MFOptExperimenter
from mf.mf_func import get_noisy_mfof_from_mfof
from mf.gen_mfgp_sample import gen_simple_mfgp_as_mfof
import synthetic_functions
from utils.option_handler import load_options
from utils.reporters import get_reporter
NOISY = True
#NOISY = False
# Debug or not
#IS_DEBUG = True
IS_DEBUG = False
# Choose experiment
# EXP_NAME = 'GP'
# EXP_NAME = 'GP-Bad-Approx'
# EXP_NAME = 'Hartmann3'
# EXP_NAME = 'Hartmann3b'
# EXP_NAME = 'Hartmann3c'
#EXP_NAME = 'Hartmann6'
# EXP_NAME = 'Hartmann6b'
EXP_NAME = 'CurrinExp'
#EXP_NAME = 'Branin'
#EXP_NAME = 'Borehole'
# Set parameters
# NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'direct']
NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
# NONFINITE_METHODS = ['gp_ucb']
# NONFINITE_METHODS = ['gp_ucb', 'direct']
FINITE_METHODS = ['mf_gp_ucb_finite', 'mf_sko']
# FINITE_METHODS = ['mf_gp_ucb_finite']
# FINITE_METHODS = []
NUM_EXPERIMENTS = 10
SAVE_RESULTS_DIR = './examples/results'
def get_problem_parameters(options):
""" Returns the problem parameters. """
prob = Namespace()
if EXP_NAME == 'GP':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=1)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'GP-Bad-Approx':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=0.01)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann3c':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(4, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(1)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
# Add finite fidels
options.finite_fidels = np.array([[0.333] * mfof.fidel_dim, [0.667] * mfof.fidel_dim])
options.finite_fidels_is_normalised = True
# If NOISY, get noisy version
if NOISY:
mfof = get_noisy_mfof_from_mfof(mfof, noise_var)
# is debug
if IS_DEBUG:
max_capital = 20 * mfof.opt_fidel_cost
num_experiments = 3
experiment_name = 'debug-%s'%(EXP_NAME)
else:
experiment_name = EXP_NAME
num_experiments = NUM_EXPERIMENTS
# Return everything in this namespace
prob = Namespace(mfof=mfof, max_capital=max_capital, noisy=NOISY,
num_experiments=num_experiments, experiment_name=experiment_name)
return prob, options
def main():
""" Main function. """
options = load_options(all_mf_gp_bandit_args)
prob, options = get_problem_parameters(options)
# Set other variables
all_methods = NONFINITE_METHODS + FINITE_METHODS
method_options = {key: options for key in all_methods}
noisy_str = 'noiseless' if not NOISY else 'noisy%0.3f'%(prob.mfof.noise_var)
save_file_prefix = '%s-%s-p%d-d%d'%(prob.experiment_name, noisy_str,
prob.mfof.fidel_dim,
prob.mfof.domain_dim)
reporter = get_reporter('default')
experimenter = MFOptExperimenter(prob.experiment_name, prob.mfof, prob.max_capital,
all_methods, prob.num_experiments, SAVE_RESULTS_DIR,
save_file_prefix=save_file_prefix,
method_options=method_options,
method_reporter=reporter,
reporter=reporter)
experimenter.run_experiments()
if __name__ == '__main__':
main()
| 4,770 | 31.455782 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synthetic_borehole.py
|
"""
Running experiments for the synthetic functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import numpy as np
# Local imports
from mf.mf_gp_bandit import all_mf_gp_bandit_args
from mf.mfopt_experimenters import MFOptExperimenter
from mf.mf_func import get_noisy_mfof_from_mfof
from mf.gen_mfgp_sample import gen_simple_mfgp_as_mfof
import synthetic_functions
from utils.option_handler import load_options
from utils.reporters import get_reporter
NOISY = True
#NOISY = False
# Debug or not
#IS_DEBUG = True
IS_DEBUG = False
# Choose experiment
# EXP_NAME = 'GP'
# EXP_NAME = 'GP-Bad-Approx'
# EXP_NAME = 'Hartmann3'
# EXP_NAME = 'Hartmann3b'
# EXP_NAME = 'Hartmann3c'
#EXP_NAME = 'Hartmann6'
# EXP_NAME = 'Hartmann6b'
#EXP_NAME = 'CurrinExp'
EXP_NAME = 'Branin'
#EXP_NAME = 'Borehole'
# Set parameters
# NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'direct']
NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
# NONFINITE_METHODS = ['gp_ucb']
# NONFINITE_METHODS = ['gp_ucb', 'direct']
FINITE_METHODS = ['mf_gp_ucb_finite', 'mf_sko']
# FINITE_METHODS = ['mf_gp_ucb_finite']
# FINITE_METHODS = []
NUM_EXPERIMENTS = 10
SAVE_RESULTS_DIR = './examples/results'
def get_problem_parameters(options):
""" Returns the problem parameters. """
prob = Namespace()
if EXP_NAME == 'GP':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=1)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'GP-Bad-Approx':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=0.01)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann3c':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(4, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(1)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
# Add finite fidels
options.finite_fidels = np.array([[0.333] * mfof.fidel_dim, [0.667] * mfof.fidel_dim])
options.finite_fidels_is_normalised = True
# If NOISY, get noisy version
if NOISY:
mfof = get_noisy_mfof_from_mfof(mfof, noise_var)
# is debug
if IS_DEBUG:
max_capital = 20 * mfof.opt_fidel_cost
num_experiments = 3
experiment_name = 'debug-%s'%(EXP_NAME)
else:
experiment_name = EXP_NAME
num_experiments = NUM_EXPERIMENTS
# Return everything in this namespace
prob = Namespace(mfof=mfof, max_capital=max_capital, noisy=NOISY,
num_experiments=num_experiments, experiment_name=experiment_name)
return prob, options
def main():
""" Main function. """
options = load_options(all_mf_gp_bandit_args)
prob, options = get_problem_parameters(options)
# Set other variables
all_methods = NONFINITE_METHODS + FINITE_METHODS
method_options = {key: options for key in all_methods}
noisy_str = 'noiseless' if not NOISY else 'noisy%0.3f'%(prob.mfof.noise_var)
save_file_prefix = '%s-%s-p%d-d%d'%(prob.experiment_name, noisy_str,
prob.mfof.fidel_dim,
prob.mfof.domain_dim)
reporter = get_reporter('default')
experimenter = MFOptExperimenter(prob.experiment_name, prob.mfof, prob.max_capital,
all_methods, prob.num_experiments, SAVE_RESULTS_DIR,
save_file_prefix=save_file_prefix,
method_options=method_options,
method_reporter=reporter,
reporter=reporter)
experimenter.run_experiments()
if __name__ == '__main__':
main()
| 4,770 | 31.455782 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/unittest_synthetic_functions.py
|
"""
Test cases for the functions in synthetic_functions.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
from synthetic_functions import get_mf_hartmann_as_mfof
from utils.base_test_class import BaseTestClass, execute_tests
class SyntheticExamplesTestCase(BaseTestClass):
"""Unit test class for general utilities. """
def __init__(self, *args, **kwargs):
super(SyntheticExamplesTestCase, self).__init__(*args, **kwargs)
def test_hartmann(self):
""" Unit tests for the hartmann function. """
self.report('Testing Hartmann function in 3 and 6 dimensions.')
# The data are in the following order:
test_data = [(3, 1), (6, 1), (3, 2), (6, 4)]
for data in test_data:
fidel_dim = data[1]
domain_dim = data[0]
num_test_pts = 1000 * domain_dim
mfof = get_mf_hartmann_as_mfof(fidel_dim, domain_dim)
# True max value
if domain_dim == 3:
true_opt_val = 3.86278
elif domain_dim == 6:
true_opt_val = 3.322368
else:
del true_opt_val
computed_opt_val = mfof.eval_single(mfof.opt_fidel, mfof.opt_pt)
# Evaluate at random points at the highest fidelity.
X_test = np.random.random((num_test_pts, domain_dim))
opt_fidel_mat = np.repeat(mfof.opt_fidel.reshape(1, -1), num_test_pts, axis=0)
F_high_test = mfof.eval_multiple(opt_fidel_mat, X_test)
max_F_high_test = max(F_high_test)
# Tests across multiple fidelities.
fidel_mid_mat = 0.5 * np.ones((num_test_pts, fidel_dim))
fidel_low_mat = np.zeros((num_test_pts, fidel_dim))
F_mid_test = mfof.eval_multiple(fidel_mid_mat, X_test)
F_low_test = mfof.eval_multiple(fidel_low_mat, X_test)
diff_mid = np.linalg.norm(F_high_test - F_mid_test) / num_test_pts
diff_low = np.linalg.norm(F_high_test - F_low_test) / num_test_pts
# Tests
self.report(('(DZ, DX)=(%d, %d):: max(true, computed, test): (%f, %f, %f), ' +
'diff_mid: %0.4f, diff_low: %0.4f')%(fidel_dim, domain_dim,
true_opt_val, computed_opt_val, max_F_high_test, diff_mid, diff_low),
'test_result')
assert np.abs(true_opt_val - computed_opt_val) < 1e-5
assert computed_opt_val > max_F_high_test
assert diff_mid < diff_low
if __name__ == '__main__':
execute_tests()
| 2,491 | 36.19403 | 87 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synthetic_barin.py
|
"""
Running experiments for the synthetic functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import numpy as np
# Local imports
from mf.mf_gp_bandit import all_mf_gp_bandit_args
from mf.mfopt_experimenters import MFOptExperimenter
from mf.mf_func import get_noisy_mfof_from_mfof
from mf.gen_mfgp_sample import gen_simple_mfgp_as_mfof
import synthetic_functions
from utils.option_handler import load_options
from utils.reporters import get_reporter
#NOISY = True
NOISY = False
# Debug or not
#IS_DEBUG = True
IS_DEBUG = False
# Choose experiment
# EXP_NAME = 'GP'
# EXP_NAME = 'GP-Bad-Approx'
#EXP_NAME = 'Hartmann3'
# EXP_NAME = 'Hartmann3b'
# EXP_NAME = 'Hartmann3c'
#EXP_NAME = 'Hartmann6'
# EXP_NAME = 'Hartmann6b'
#EXP_NAME = 'CurrinExp'
EXP_NAME = 'Branin'
# EXP_NAME = 'Borehole'
# Set parameters
# NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'direct']
NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
# NONFINITE_METHODS = ['gp_ucb']
# NONFINITE_METHODS = ['gp_ucb', 'direct']
FINITE_METHODS = ['mf_gp_ucb_finite', 'mf_sko']
# FINITE_METHODS = ['mf_gp_ucb_finite']
# FINITE_METHODS = []
NUM_EXPERIMENTS = 10
SAVE_RESULTS_DIR = './examples/results'
def get_problem_parameters(options):
""" Returns the problem parameters. """
prob = Namespace()
if EXP_NAME == 'GP':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=1)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'GP-Bad-Approx':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=0.01)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann3c':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(4, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(1)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
# Add finite fidels
options.finite_fidels = np.array([[0.333] * mfof.fidel_dim, [0.667] * mfof.fidel_dim])
options.finite_fidels_is_normalised = True
# If NOISY, get noisy version
if NOISY:
mfof = get_noisy_mfof_from_mfof(mfof, noise_var)
# is debug
if IS_DEBUG:
max_capital = 20 * mfof.opt_fidel_cost
num_experiments = 3
experiment_name = 'debug-%s'%(EXP_NAME)
else:
experiment_name = EXP_NAME
num_experiments = NUM_EXPERIMENTS
# Return everything in this namespace
prob = Namespace(mfof=mfof, max_capital=max_capital, noisy=NOISY,
num_experiments=num_experiments, experiment_name=experiment_name)
return prob, options
def main():
""" Main function. """
options = load_options(all_mf_gp_bandit_args)
prob, options = get_problem_parameters(options)
# Set other variables
all_methods = NONFINITE_METHODS + FINITE_METHODS
method_options = {key: options for key in all_methods}
noisy_str = 'noiseless' if not NOISY else 'noisy%0.3f'%(prob.mfof.noise_var)
save_file_prefix = '%s-%s-p%d-d%d'%(prob.experiment_name, noisy_str,
prob.mfof.fidel_dim,
prob.mfof.domain_dim)
reporter = get_reporter('default')
experimenter = MFOptExperimenter(prob.experiment_name, prob.mfof, prob.max_capital,
all_methods, prob.num_experiments, SAVE_RESULTS_DIR,
save_file_prefix=save_file_prefix,
method_options=method_options,
method_reporter=reporter,
reporter=reporter)
experimenter.run_experiments()
if __name__ == '__main__':
main()
| 4,770 | 31.455782 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synthetic_hartman6d.py
|
"""
Running experiments for the synthetic functions.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import numpy as np
# Local imports
from mf.mf_gp_bandit import all_mf_gp_bandit_args
from mf.mfopt_experimenters import MFOptExperimenter
from mf.mf_func import get_noisy_mfof_from_mfof
from mf.gen_mfgp_sample import gen_simple_mfgp_as_mfof
import synthetic_functions
from utils.option_handler import load_options
from utils.reporters import get_reporter
#NOISY = True
NOISY = False
# Debug or not
#IS_DEBUG = True
IS_DEBUG = False
# Choose experiment
# EXP_NAME = 'GP'
# EXP_NAME = 'GP-Bad-Approx'
#EXP_NAME = 'Hartmann3'
# EXP_NAME = 'Hartmann3b'
# EXP_NAME = 'Hartmann3c'
EXP_NAME = 'Hartmann6'
# EXP_NAME = 'Hartmann6b'
#EXP_NAME = 'CurrinExp'
# EXP_NAME = 'Branin'
# EXP_NAME = 'Borehole'
# Set parameters
# NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei', 'direct']
NONFINITE_METHODS = ['mf_gp_ucb', 'gp_ucb', 'gp_ei']
# NONFINITE_METHODS = ['gp_ucb']
# NONFINITE_METHODS = ['gp_ucb', 'direct']
FINITE_METHODS = ['mf_gp_ucb_finite', 'mf_sko']
# FINITE_METHODS = ['mf_gp_ucb_finite']
# FINITE_METHODS = []
NUM_EXPERIMENTS = 10
SAVE_RESULTS_DIR = './examples/results'
def get_problem_parameters(options):
""" Returns the problem parameters. """
prob = Namespace()
if EXP_NAME == 'GP':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=1)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'GP-Bad-Approx':
mfof = gen_simple_mfgp_as_mfof(fidel_bw=0.01)
mfof.init_mfgp = mfof.mfgp
max_capital = 20 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.01
elif EXP_NAME == 'Hartmann3b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann3c':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(4, 3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Hartmann6b':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(2, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(3)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
# Add finite fidels
options.finite_fidels = np.array([[0.333] * mfof.fidel_dim, [0.667] * mfof.fidel_dim])
options.finite_fidels_is_normalised = True
# If NOISY, get noisy version
if NOISY:
mfof = get_noisy_mfof_from_mfof(mfof, noise_var)
# is debug
if IS_DEBUG:
max_capital = 20 * mfof.opt_fidel_cost
num_experiments = 3
experiment_name = 'debug-%s'%(EXP_NAME)
else:
experiment_name = EXP_NAME
num_experiments = NUM_EXPERIMENTS
# Return everything in this namespace
prob = Namespace(mfof=mfof, max_capital=max_capital, noisy=NOISY,
num_experiments=num_experiments, experiment_name=experiment_name)
return prob, options
def main():
""" Main function. """
options = load_options(all_mf_gp_bandit_args)
prob, options = get_problem_parameters(options)
# Set other variables
all_methods = NONFINITE_METHODS + FINITE_METHODS
method_options = {key: options for key in all_methods}
noisy_str = 'noiseless' if not NOISY else 'noisy%0.3f'%(prob.mfof.noise_var)
save_file_prefix = '%s-%s-p%d-d%d'%(prob.experiment_name, noisy_str,
prob.mfof.fidel_dim,
prob.mfof.domain_dim)
reporter = get_reporter('default')
experimenter = MFOptExperimenter(prob.experiment_name, prob.mfof, prob.max_capital,
all_methods, prob.num_experiments, SAVE_RESULTS_DIR,
save_file_prefix=save_file_prefix,
method_options=method_options,
method_reporter=reporter,
reporter=reporter)
experimenter.run_experiments()
if __name__ == '__main__':
main()
| 4,771 | 31.462585 | 88 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/experiment_synth_MFHOO.py
|
import numpy as np
import Queue
from mf.mf_func import MFOptFunction
from utils.general_utils import map_to_cube
import sys
from examples.synthetic_functions import *
from mf.mf_func import get_noisy_mfof_from_mfof
from letters.letters_classifier import *
import time
from MFTree.MFHOO import *
import synthetic_functions
NUM_EXP = 5
#EXP_NAME = 'Branin'
#EXP_NAME = 'CurrinExp'
#EXP_NAME = 'Hartmann3'
#EXP_NAME = 'Hartmann6'
EXP_NAME = 'Borehole'
def run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filname):
R = []
T = []
for t in times:
budget = t*mfobject.opt_fidel_cost
t1 = time.time()
MP = MFPOO(mfobject=mfobject, nu_max=nu, rho_max=rho, total_budget=budget, sigma=sigma, C=C, mult=0.5, tol = 1e-3, Randomize = False, Auto = True, unit_cost=t0 )
MP.run_all_MFHOO()
X, E = MP.get_point()
t2 = time.time()
R = R + [E]
T = T + [MP.cost]
print str(MP.cost) + ' : ' + str(E)
#print 'Total HOO Queries: ' + str(MP.t)
np.save(filename,R)
return np.array(R),np.array(T)
if __name__ == '__main__':
if EXP_NAME == 'Hartmann3':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 3)
noise_var = 0.01
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Hartmann6':
mfof = synthetic_functions.get_mf_hartmann_as_mfof(1, 6)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'CurrinExp':
mfof = synthetic_functions.get_mf_currin_exp_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.5
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Branin':
mfof = synthetic_functions.get_mf_branin_as_mfof(1)
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 0.05
sigma = np.sqrt(noise_var)
elif EXP_NAME == 'Borehole':
mfof = synthetic_functions.get_mf_borehole_as_mfof()
max_capital = 200 * mfof.opt_fidel_cost
noise_var = 5
sigma = np.sqrt(noise_var)
times = [10,20,50,75,100,150,175, 200]
mfobject = get_noisy_mfof_from_mfof(mfof, noise_var)
nu = 1.0
rho = 0.95
C = 0.1
t0 = mfobject.opt_fidel_cost
NT = str(time.time())
print 'Running Experiment 1: '
filename = 'MFHOO' + EXP_NAME + '_' + NT + '_' + '1.npy'
R,T = run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filename)
result = R
for i in range(1,NUM_EXP):
print 'Running Experiment' + str(i+1) + ': '
filename = 'MFHOO' + EXP_NAME + '_' + NT + '_' + str(i+1) + '.npy'
R,T = run_one_experiment(mfobject,nu,rho,times,sigma,C,t0,filename)
result = np.vstack([result,R])
mu = np.mean(result,axis = 0)
std = np.std(result,axis = 0)
result = mfobject.opt_val - mu
filename = './examples/results/MFHOO_' + EXP_NAME + '_' + NT + '_' + '.csv'
dfdic = {}
dfdic['Capital'] = np.array(times)
dfdic['Value'] = result
dfdic['Std'] = std
df = pd.DataFrame(dfdic)
df.to_csv(filename)
| 2,797 | 24.907407 | 163 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/__init__.py
|
"""
Contains some results functions.
-- [email protected]
-- [email protected]
"""
| 95 | 15 | 34 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/examples/synthetic_functions.py
|
"""
A collection of utilities for MF-GP Bandits.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from mf.mf_func import MFOptFunction
from utils.general_utils import map_to_cube
# Hartmann Functions ---------------------------------------------------------------------
def hartmann(x, alpha, A, P, max_val=np.inf):
""" Computes the hartmann function for any given A and P. """
log_sum_terms = (A * (P - x)**2).sum(axis=1)
return min(max_val, alpha.dot(np.exp(-log_sum_terms)))
def _get_hartmann_data(domain_dim):
""" Returns A and P for the 3D hartmann function. """
# pylint: disable=bad-whitespace
if domain_dim == 3:
A = np.array([[3.0, 10, 30],
[0.1, 10, 35],
[3.0, 10, 30],
[0.1, 10, 35]], dtype=np.float64)
P = 1e-4 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[ 381, 5743, 8828]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 3
opt_pt = np.array([0.114614, 0.555649, 0.852547])
max_val = 3.86278
elif domain_dim == 6:
A = np.array([[ 10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[ 3, 3.5, 1.7, 10, 17, 8],
[ 17, 8, 0.05, 10, 0.1, 14]], dtype=np.float64)
P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]], dtype=np.float64)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
domain = [[0, 1]] * 6
opt_pt = np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573])
max_val = 3.322368
else:
raise NotImplementedError('Only implemented in 3 and 6 dimensions.')
return A, P, alpha, opt_pt, domain, max_val
def get_mf_hartmann_function(fidel_dim, domain_dim):
""" Returns a function f(z, x). z refers to the fidelity and x is the point in the
domain. """
A, P, alpha, opt_pt, domain_bounds, max_val = _get_hartmann_data(domain_dim)
# This is how much we will perturb the alphas
delta = np.array([0.1] * fidel_dim + [0] * (4-fidel_dim))
# Define a wrapper for the objective
def mf_hart_obj(z, x):
""" Wrapper for the hartmann objective. z is fidelity and x is domain. """
assert len(z) == fidel_dim
z_extended = np.append(z, [0] * (4-fidel_dim))
alpha_z = alpha - (1 - z_extended) * delta
return hartmann(x, alpha_z, A, P, max_val)
# Define the optimum fidelity and the fidelity bounds
opt_fidel = np.ones(fidel_dim)
fidel_bounds = [[0, 1]] * fidel_dim
return mf_hart_obj, opt_pt, opt_fidel, fidel_bounds, domain_bounds
def get_mf_hartmann_as_mfof(fidel_dim, domain_dim):
""" Wrapper for get_mf_hartmann_function which returns the function as a
mf.mf_func.MFOptFunction object. """
mf_hart, opt_pt, opt_fidel, fidel_bounds, domain_bounds = get_mf_hartmann_function(
fidel_dim, domain_dim)
fidel_cost_function = _get_mf_cost_function(fidel_bounds, True)
opt_val = mf_hart(opt_fidel, opt_pt)
return MFOptFunction(mf_hart, fidel_cost_function, fidel_bounds, domain_bounds,
opt_fidel, vectorised=False, opt_pt=opt_pt, opt_val=opt_val)
# Hartmann Functions end here ------------------------------------------------------------
# Currin Exponential Function ------------------------------------------------------------
def currin_exp(x, alpha):
""" Computes the currin exponential function. """
x1 = x[0]
x2 = x[1]
val_1 = 1 - alpha * np.exp(-1/(2 * x2))
val_2 = (2300*x1**3 + 1900*x1**2 + 2092*x1 + 60) / (100*x1**3 + 500*x1**2 + 4*x1 + 20)
return val_1 * val_2
def get_mf_currin_exp_function():
""" Returns the multi-fidelity currin exponential function with d=6 and p=2. """
opt_val = 13.7986850
def mf_currin_exp_obj(z, x):
""" Wrapper for the MF currin objective. """
alpha_z = 1 - 0.1 * z
return min(opt_val, currin_exp(x, alpha_z))
opt_fidel = np.array([1])
opt_pt = None
fidel_bounds = np.array([[0, 1]])
domain_bounds = np.array([[0, 1], [0, 1]])
return mf_currin_exp_obj, opt_pt, opt_val, opt_fidel, fidel_bounds, domain_bounds
def get_mf_currin_exp_as_mfof():
""" Wrapper for get_mf_currin_exp_function which returns the function as a
mf.mf_func.MFOptFunction object. """
mf_currin_exp_obj, opt_pt, opt_val, opt_fidel, fidel_bounds, domain_bounds = \
get_mf_currin_exp_function()
fidel_cost_function = lambda z: 0.1 + z**2
return MFOptFunction(mf_currin_exp_obj, fidel_cost_function, fidel_bounds,
domain_bounds, opt_fidel, vectorised=False, opt_pt=opt_pt,
opt_val=opt_val)
# Currin Exponential Function ends here --------------------------------------------------
# Branin Function ------------------------------------------------------------------------
def branin_function(x, a, b, c, r, s, t):
""" Computes the Branin function. """
x1 = x[0]
x2 = x[1]
neg_ret = a * (x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*np.cos(x1) + s
return -neg_ret
def branin_function_alpha(x, alpha, a, r, s):
""" Alternative form for the branin function. """
return branin_function(x, a, alpha[0], alpha[1], r, s, alpha[2])
def get_mf_branin_function(fidel_dim):
""" Returns the Branin function as a multifidelity function. """
a0 = 1
b0 = 5.1/(4*np.pi**2)
c0 = 5/np.pi
r0 = 6
s0 = 10
t0 = 1/(8*np.pi)
alpha = np.array([b0, c0, t0])
# Define delta
delta = [0.01, 0.1, -0.005]
delta = np.array(delta[0:fidel_dim] + [0] * (3 - fidel_dim))
def mf_branin_obj(z, x):
""" Wrapper for the MF Branin objective. """
assert len(z) == fidel_dim
z_extended = np.append(z, [0] * (3-fidel_dim))
alpha_z = alpha - (1 - z_extended) * delta
return branin_function_alpha(x, alpha_z, a0, r0, s0)
# Other data
opt_fidel = np.ones((fidel_dim))
fidel_bounds = [[0, 1]] * fidel_dim
opt_pt = np.array([np.pi, 2.275])
domain_bounds = [[-5, 10], [0, 15]]
return mf_branin_obj, opt_pt, opt_fidel, fidel_bounds, domain_bounds
def get_mf_branin_as_mfof(fidel_dim):
""" Wrapper for get_mf_branin_function which returns as a mfof. """
mf_branin_obj, opt_pt, opt_fidel, fidel_bounds, domain_bounds = \
get_mf_branin_function(fidel_dim)
fidel_cost_function = _get_mf_cost_function(fidel_bounds, True)
opt_val = mf_branin_obj(opt_fidel, opt_pt)
return MFOptFunction(mf_branin_obj, fidel_cost_function, fidel_bounds, domain_bounds,
opt_fidel, vectorised=False, opt_pt=opt_pt, opt_val=opt_val)
# Branin Function ends here --------------------------------------------------------------
# Borehole Function ----------------------------------------------------------------------
def borehole_function(x, z, max_val):
""" Computes the Bore Hole function. """
# pylint: disable=bad-whitespace
rw = x[0]
r = x[1]
Tu = x[2]
Hu = x[3]
Tl = x[4]
Hl = x[5]
L = x[6]
Kw = x[7]
# Compute high fidelity function
frac2 = 2*L*Tu/(np.log(r/rw) * rw**2 * Kw)
f2 = min(max_val, 2 * np.pi * Tu * (Hu - Hl)/(np.log(r/rw) * (1 + frac2 + Tu/Tl)))
# Compute low fidelity function
f1 = 5 * Tu * (Hu - Hl)/(np.log(r/rw) * (1.5 + frac2 + Tu/Tl))
# Compute final output
return f2*z + f1*(1-z)
def get_mf_borehole_function():
""" Gets the MF BoreHole function. """
opt_val = 309.523221
opt_pt = None
mf_borehole_function = lambda z, x: borehole_function(x, z, opt_val)
domain_bounds = [[0.05, 0.15],
[100, 50000],
[63070, 115600],
[990, 1110],
[63.1, 116],
[700, 820],
[1120, 1680],
[9855, 12045]]
fidel_bounds = [[0, 1]]
opt_fidel = np.array([1])
return mf_borehole_function, opt_pt, opt_val, opt_fidel, fidel_bounds, domain_bounds
def get_mf_borehole_as_mfof():
""" Gets the MF BoreHold as an mfof. """
mf_borehole_function, opt_pt, opt_val, opt_fidel, fidel_bounds, domain_bounds = \
get_mf_borehole_function()
fidel_cost_function = lambda z: 0.1 + z**1.5
return MFOptFunction(mf_borehole_function, fidel_cost_function, fidel_bounds,
domain_bounds, opt_fidel, vectorised=False, opt_pt=opt_pt,
opt_val=opt_val)
# Borehole Function ends here ------------------------------------------------------------
def _get_mf_cost_function(fidel_bounds, is_0_1):
""" Returns the cost function. fidel_bounds are the bounds for the fidelity space
and is_0_1 should be true if fidel_bounds is [0,1]^p. """
fidel_dim = len(fidel_bounds)
if fidel_dim == 1:
fidel_powers = [2]
elif fidel_dim == 2:
fidel_powers = [3, 2]
elif fidel_dim == 3:
fidel_powers = [3, 2, 1.5]
else:
fidel_powers = [3] + list(np.linspace(2, 1.2, fidel_dim-1))
# Define the normalised
def _norm_cost_function(norm_z):
""" The cost function with normalised coordinates. """
min_cost = 0.05
return min_cost + (1-min_cost) * np.power(norm_z, fidel_powers).sum()
# Now return based on whether or not is_0_1
ret = (_norm_cost_function if is_0_1 else
lambda z: _norm_cost_function(map_to_cube(z, fidel_bounds)))
return ret
| 9,602 | 38.846473 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/MFTreeSearchCV/MFHOO.py
|
#Author: Rajat Sen
from __future__ import print_function
from __future__ import division
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
from mf.mf_func import MFOptFunction # MF function object
from utils.general_utils import map_to_cube # mapping everything to [0,1]^d cube
import sys
#from examples.synthetic_functions import *
from multiprocessing import Process
#import brewer2mpl
import pandas as pd
import random
import sys
import time
import datetime
nu_mult = 1.0 # multiplier to the nu parameter
def flip(p):
return True if random.random() < p else False
class MF_node(object):
def __init__(self,cell,value,fidel,upp_bound,height,dimension,num):
'''This is a node of the MFTREE
cell: tuple denoting the bounding boxes of the partition
m_value: mean value of the observations in the cell and its children
value: value in the cell
fidelity: the last fidelity that the cell was queried with
upp_bound: B_{i,t} in the paper
t_bound: upper bound with the t dependent term
height: height of the cell (sometimes can be referred to as depth in the tree)
dimension: the dimension of the parent that was halved in order to obtain this cell
num: number of queries inside this partition so far
left,right,parent: pointers to left, right and parent
'''
self.cell = cell
self.m_value = value
self.value = value
self.fidelity = fidel
self.upp_bound = upp_bound
self.height = height
self.dimension = dimension
self.num = num
self.t_bound = upp_bound
self.left = None
self.right = None
self.parent = None
def __cmp__(self,other):
return cmp(other.t_bound,self.t_bound)
def in_cell(node,parent):
'''
Check if 'node' is a subset of 'parent'
node can either be a MF_node or just a tuple denoting its cell
'''
try:
ncell = list(node.cell)
except:
ncell = list(node)
pcell = list(parent.cell)
flag = 0
for i in range(len(ncell)):
if ncell[i][0] >= pcell[i][0] and ncell[i][1] <= pcell[i][1]:
flag = 0
else:
flag = 1
break
if flag == 0:
return True
else:
return False
class MF_tree(object):
'''
MF_tree class that maintains the multi-fidelity tree
nu: nu parameter in the paper
rho: rho parameter in the paper
sigma: noise variance, ususally a hyperparameter for the whole process
C: parameter for the bias function as defined in the paper
root: can initialize a root node, when this parameter is supplied by a MF_node object instance
'''
def __init__(self,nu,rho,sigma,C,root = None):
self.nu = nu
self.rho = rho
self.sigma = sigma
self.root = root
self.C = C
self.root = root
self.mheight = 0
self.maxi = float(-sys.maxsize - 1)
self.current_best = root
def insert_node(self,root,node):
'''
insert a node in the tree in the appropriate position
'''
if self.root is None:
node.height = 0
if self.mheight < node.height:
self.mheight = node.height
self.root = node
self.root.parent = None
return self.root
if root is None:
node.height = 0
if self.mheight < node.height:
self.mheight = node.height
root = node
root.parent = None
return root
if root.left is None and root.right is None:
node.height = root.height + 1
if self.mheight < node.height:
self.mheight = node.height
root.left = node
root.left.parent = root
return root.left
elif root.left is not None:
if in_cell(node,root.left):
return self.insert_node(root.left,node)
elif root.right is not None:
if in_cell(node,root.right):
return self.insert_node(root.right,node)
else:
node.height = root.height + 1
if self.mheight < node.height:
self.mheight = node.height
root.right = node
root.right.parent = root
return root.right
def update_parents(self,node,val):
'''
update the upperbound and mean value of a parent node, once a new child is inserted in its child tree. This process proceeds recursively up the tree
'''
if node.parent is None:
return
else:
parent = node.parent
parent.m_value = (parent.num*parent.m_value + val)/(1.0 + parent.num)
parent.num = parent.num + 1.0
parent.upp_bound = parent.m_value + 2*((self.rho)**(parent.height))*self.nu
self.update_parents(parent,val)
def update_tbounds(self,root,t):
'''
updating the tbounds of every node recursively
'''
if root is None:
return
self.update_tbounds(root.left,t)
self.update_tbounds(root.right,t)
root.t_bound = root.upp_bound + np.sqrt(2*(self.sigma**2)*np.log(t)/root.num)
maxi = None
if root.left:
maxi = root.left.t_bound
if root.right:
if maxi:
if maxi < root.right.t_bound:
maxi = root.right.t_bound
else:
maxi = root.right.t_bound
if maxi:
root.t_bound = min(root.t_bound,maxi)
def print_given_height(self,root,height):
if root is None:
return
if root.height == height:
print (root.cell, root.num,root.upp_bound,root.t_bound),
elif root.height < height:
if root.left:
self.print_given_height(root.left,height)
if root.right:
self.print_given_height(root.right,height)
else:
return
def levelorder_print(self):
'''
levelorder print
'''
for i in range(self.mheight + 1):
self.print_given_height(self.root,i)
print('\n')
def search_cell(self,root,cell):
'''
check if a cell is present in the tree
'''
if root is None:
return False,None,None
if root.left is None and root.right is None:
if root.cell == cell:
return True,root,root.parent
else:
return False,None,root
if root.left:
if in_cell(cell,root.left):
return self.search_cell(root.left,cell)
if root.right:
if in_cell(cell,root.right):
return self.search_cell(root.right,cell)
def get_next_node(self,root):
'''
getting the next node to be queried or broken, see the algorithm in the paper
'''
if root is None:
print('Could not find next node. Check Tree.')
if root.left is None and root.right is None:
return root
if root.left is None:
return self.get_next_node(root.right)
if root.right is None:
return self.get_next_node(root.left)
if root.left.t_bound > root.right.t_bound:
return self.get_next_node(root.left)
elif root.left.t_bound < root.right.t_bound:
return self.get_next_node(root.right)
else:
bit = flip(0.5)
if bit:
return self.get_next_node(root.left)
else:
return self.get_next_node(root.right)
def get_current_best(self,root):
'''
get current best cell from the tree
'''
if root is None:
return
if root.right is None and root.left is None:
val = root.m_value - self.nu*((self.rho)**(root.height))
if self.maxi < val:
self.maxi = val
cell = list(root.cell)
self.current_best =np.array([(s[0]+s[1])/2.0 for s in cell])
return
if root.left:
self.get_current_best(root.left)
if root.right:
self.get_current_best(root.right)
class MFHOO(object):
'''
MFHOO algorithm, given a fixed nu and rho
mfobject: multi-fidelity noisy function object
nu: nu parameter
rho: rho parameter
budget: total budget provided either in units or time in seconds
sigma: noise parameter
C: bias function parameter
tol: default parameter to decide whether a new fidelity query is required for a cell
Randomize: True implies that the leaf is split on a randomly chosen dimension, False means the scheme in DIRECT algorithm is used. We recommend using False.
Auto: Select C automatically, which is recommended for real data experiments
CAPITAL: 'Time' mean time in seconds is used as cost unit, while 'Actual' means unit cost used in synthetic experiments
debug: If true then more messages are printed
'''
def __init__(self,mfobject, nu, rho, budget, sigma, C, tol = 1e-3,\
Randomize = False, Auto = False,value_dict = {},\
CAPITAL = 'Time', debug = 'True'):
self.mfobject = mfobject
self.nu = nu
self.rho = rho
self.budget = budget
self.C = C
self.t = 0
self.sigma = sigma
self.tol = tol
self.Randomize = Randomize
self.cost = 0
self.cflag = False
self.value_dict = value_dict
self.CAPITAL = CAPITAL
self.debug = debug
if Auto:
z1 = 0.8
z2 = 0.2
d = self.mfobject.domain_dim
x = np.array([0.5]*d)
t1 = time.time()
v1 = self.mfobject.eval_at_fidel_single_point_normalised([z1], x)
v2 = self.mfobject.eval_at_fidel_single_point_normalised([z2], x)
t2 = time.time()
self.C = np.sqrt(2)*np.abs(v1-v2)/np.abs(z1-z2)
self.nu = nu_mult*self.C
if self.debug:
print('Auto Init: ')
print('C: ' + str(self.C))
print('nu: ' + str(self.nu))
c1 = self.mfobject.eval_fidel_cost_single_point_normalised([z1])
c2 = self.mfobject.eval_fidel_cost_single_point_normalised([z2])
self.cost = c1 + c2
if self.CAPITAL == 'Time':
self.cost = t2 - t1
d = self.mfobject.domain_dim
cell = tuple([(0,1)]*d)
height = 0
dimension = 0
root,cost= self.querie(cell,height, self.rho, self.nu, dimension, option = 1)
self.t = self.t + 1
self.Tree = MF_tree(nu,rho,self.sigma,C,root)
self.Tree.update_tbounds(self.Tree.root,self.t)
self.cost = self.cost + cost
def get_value(self,cell,fidel):
'''cell: tuple'''
x = np.array([(s[0]+s[1])/2.0 for s in list(cell)])
return self.mfobject.eval_at_fidel_single_point_normalised([fidel], x)
def querie(self,cell,height, rho, nu,dimension,option = 1):
diam = nu*(rho**height)
if option == 1:
z = min(max(1 - diam/self.C,self.tol),1.0)
else:
z = 1.0
if cell in self.value_dict:
current = self.value_dict[cell]
if abs(current.fidelity - z) <= self.tol:
value = current.value
cost = 0
else:
t1 = time.time()
value = self.get_value(cell,z)
t2 = time.time()
if abs(value - current.value) > self.C*abs(current.fidelity - z):
self.cflag = True
current.value = value
current.m_value = value
current.fidelity = z
self.value_dict[cell] = current
if self.CAPITAL == 'Time':
cost = t2 - t1
else:
cost = self.mfobject.eval_fidel_cost_single_point_normalised([z])
else:
t1 = time.time()
value = self.get_value(cell,z)
t2 = time.time()
bhi = 2*diam + value
self.value_dict[cell] = MF_node(cell,value,z,bhi,height,dimension,1)
if self.CAPITAL == 'Time':
cost = t2 - t1
else:
cost = self.mfobject.eval_fidel_cost_single_point_normalised([z])
bhi = 2*diam + value
current_object = MF_node(cell,value,z,bhi,height,dimension,1)
return current_object,cost
def split_children(self,current,rho,nu,option = 1):
pcell = list(current.cell)
span = [abs(pcell[i][1] - pcell[i][0]) for i in range(len(pcell))]
if self.Randomize:
dimension = np.random.choice(range(len(pcell)))
else:
dimension = np.argmax(span)
dd = len(pcell)
if dimension == current.dimension:
dimension = (current.dimension - 1)%dd
cost = 0
h = current.height + 1
l = np.linspace(pcell[dimension][0],pcell[dimension][1],3)
children = []
for i in range(len(l)-1):
cell = []
for j in range(len(pcell)):
if j != dimension:
cell = cell + [pcell[j]]
else:
cell = cell + [(l[i],l[i+1])]
cell = tuple(cell)
child,c = self.querie(cell, h, rho, nu,dimension,option)
children = children + [child]
cost = cost + c
return children, cost
def take_HOO_step(self):
current = self.Tree.get_next_node(self.Tree.root)
children,cost = self.split_children(current,self.rho,self.nu,1)
self.t = self.t + 2
self.cost = self.cost + cost
rnode = self.Tree.insert_node(self.Tree.root,children[0])
self.Tree.update_parents(rnode,rnode.value)
rnode = self.Tree.insert_node(self.Tree.root,children[1])
self.Tree.update_parents(rnode,rnode.value)
self.Tree.update_tbounds(self.Tree.root,self.t)
def run(self):
while self.cost <= self.budget:
self.take_HOO_step()
def get_point(self):
self.Tree.get_current_best(self.Tree.root)
return self.Tree.current_best
class MFPOO(object):
'''
MFPOO object that spawns multiple MFHOO instances
'''
def __init__(self,mfobject, nu_max, rho_max, total_budget, sigma, C, mult, tol = 1e-3, Randomize = False, Auto = False,unit_cost = 1.0, CAPITAL = 'Time', debug = 'True'):
self.mfobject = mfobject
self.nu_max = nu_max
self.rho_max = rho_max
self.total_budget = total_budget
self.C = C
self.t = 0
self.sigma = sigma
self.tol = tol
self.Randomize = Randomize
self.cost = 0
self.value_dict = {}
self.MH_arr = []
self.CAPITAL = CAPITAL
self.debug = debug
if Auto:
if unit_cost is None:
z1 = 1.0
if self.debug:
print('Setting unit cost automatically as None was supplied')
else:
z1 = 0.8
z2 = 0.2
d = self.mfobject.domain_dim
x = np.array([0.5]*d)
t1 = time.time()
v1 = self.mfobject.eval_at_fidel_single_point_normalised([z1], x)
t3 = time.time()
v2 = self.mfobject.eval_at_fidel_single_point_normalised([z2], x)
t2 = time.time()
self.C = np.sqrt(2)*np.abs(v1-v2)/np.abs(z1-z2)
self.nu_max = nu_mult*self.C
if unit_cost is None:
unit_cost = t3 - t1
if self.debug:
print('Unit Cost: ',unit_cost)
if self.debug:
print('Auto Init: ')
print('C: ' + str(self.C))
print('nu: ' + str(self.nu_max))
c1 = self.mfobject.eval_fidel_cost_single_point_normalised([z1])
c2 = self.mfobject.eval_fidel_cost_single_point_normalised([z2])
self.total_budget = self.total_budget - c1 - c2
if self.CAPITAL == 'Time':
self.total_budget = self.total_budget - (t2 - t1)
if self.debug:
print('Budget Remaining: ' + str(self.total_budget))
if self.CAPITAL == 'Time':
self.unit_cost = unit_cost
else:
self.unit_cost = self.mfobject.eval_fidel_cost_single_point_normalised([1.0])
n = max(self.total_budget/self.unit_cost,1)
Dm = int(np.log(2.0)/np.log(1/self.rho_max))
nHOO = int(mult*Dm*np.log(n/np.log(n+1)))
self.nHOO = max(1,int(min(max(1,nHOO),n/2+1)))
self.budget = (self.total_budget - self.nHOO*self.unit_cost)/float(self.nHOO)
if self.debug:
print('Number of MFHOO Instances: ' + str(self.nHOO))
print('Budget per MFHOO Instance:' + str(self.budget))
def run_all_MFHOO(self):
nu = self.nu_max
for i in range(self.nHOO):
rho = self.rho_max**(float(self.nHOO)/(self.nHOO-i))
MH = MFHOO(mfobject = self.mfobject, nu=nu, rho=rho, budget=self.budget, sigma=self.sigma, C=self.C, tol = 1e-3, Randomize = False, Auto = False,value_dict = self.value_dict, CAPITAL = self.CAPITAL, debug = self.debug)
print('Running SOO number: ' + str(i+1) + ' rho: ' + str(rho) + ' nu: ' + str(nu))
MH.run()
print('Done!')
self.cost = self.cost + MH.cost
if MH.cflag:
self.C = 1.4*self.C
nu = nu_mult*self.C
self.nu_max = nu_mult*self.C
if self.debug:
print('Updating C')
print('C: ' + str(self.C))
print('nu_max: ' + str(nu))
self.value_dict = MH.value_dict
self.MH_arr = self.MH_arr + [MH]
def get_point(self):
points = [H.get_point() for H in self.MH_arr]
for H in self.MH_arr:
self.t = self.t + H.t
evals = [self.mfobject.eval_at_fidel_single_point_normalised([1.0],x) for x in points]
if self.CAPITAL == 'Actual':
self.cost = self.cost + self.nHOO*self.mfobject.eval_fidel_cost_single_point_normalised([1.0])
else:
self.cost = self.cost + self.nHOO*self.unit_cost
index = np.argmax(evals)
newp = []
for p in points:
_,npoint = self.mfobject.get_unnormalised_coords(None,p)
newp = newp + [npoint]
return newp,evals
| 15,512 | 26.168126 | 221 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/MFTreeSearchCV/MFTreeFunction.py
|
#Author: Rajat Sen
# general MF function object for doing tree search on scikit-learn classifier/regressor object
from __future__ import print_function
from __future__ import division
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
from sklearn.metrics import *
from MFTreeSearchCV.converters import *
from sklearn.model_selection import cross_val_score
from copy import deepcopy
from scipy.stats import norm
from scipy import integrate
# Local imports
from mf.mf_func import MFOptFunction
import warnings
from sklearn.model_selection import cross_val_score
import pandas as pd
def return_scoring_function(tag):
'''
Given a scoring tag like 'accuracy' returns the
corresponding scoring function. For example given
the string 'accuracy', this will return the function accuracy_score
from sklearn.model_selection
'''
if tag == 'accuracy':
f = accuracy_score
elif tag == 'balanced_accuracy':
f = balanced_accuracy_score
elif tag == 'average_precision':
f = average_precision_score
elif tag == 'brier_score_loss':
f = brier_score_loss
elif tag == 'f1':
f = f1_score
elif tag == 'neg_log_loss':
f = log_loss
elif tag == 'precision':
f = precision_score
elif tag == 'recall':
f = recall_score
elif tag == 'roc_auc':
f = roc_auc_score
elif tag == 'explained_variance':
f = explained_variance_score
elif tag == 'neg_mean_absolute_error':
f = mean_absolute_error
elif tag == 'neg_mean_squared_error':
f = mean_squared_error
elif tag == 'neg_mean_squared_log_error':
f = mean_squared_log_error
elif tag == 'neg_median_absolute_error':
f = median_absolute_error
elif tag == 'r2':
f = r2_score
else:
raise ValueError('Unrecognized scorer tag!')
return f
def merge_two_dicts(x, y):
'''
merges the two disctionaries x and y and returns the merged dictionary
'''
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
class MFTreeFunction(MFOptFunction):
'''
A multi-fidelity function class which can be queried at 'x' at different
fidelity levels 'z in [0,1]'.
----------
X: training data features
y: training laabel features
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Unlike grid search CV, estimator need not provide a ``score`` function.
Therefore ``scoring`` must be passed.
param_dict : Dictionary with parameters names (string) as keys and and the value is another dictionary. The value dictionary has
the keys 'range' that specifies the range of the hyper-parameter, 'type': 'int' or 'cat' or 'real' (integere, categorical or real),
and 'scale': 'linear' or 'log' specifying whether the search is done on a linear scale or a logarithmic scale. An example for param_dict
for scikit-learn SVC is as follows:
eg: param_dict = {'C' : {'range': [1e-2,1e2], 'type': 'real', 'scale': 'log'}, \
'kernel' : {'range': [ 'linear', 'poly', 'rbf', 'sigmoid'], 'type': 'cat'}, \
'degree' : {'range': [3,10], 'type': 'int', 'scale': 'linear'}}
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`). this must be specified as a string. See scikit-learn metrics
for more details.
fixed_params: dictionary of parameter values other than the once in param_dict, that should be held fixed at the supplied value.
For example, if fixed_params = {'nthread': 10} is passed with estimator as XGBoost, it means that all
XGBoost instances will be run with 10 parallel threads
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
debug : Binary
Controls the verbosity: True means more messages, while False only prints critical messages
refit : True means the best parameters are fit into an estimator and trained, while False means the best_estimator is not refit
fidelity_range : range of fidelity to use. It is a tuple (a,b) which means lowest fidelity means a samples are used for training and
validation and b samples are used when fidelity is the highest. We recommend setting b to be the total number of training samples
available and a to bea reasonable value.
n_jobs : number of parallel runs for the CV. Note that njobs * (number of threads used in the estimator) must be less than the number of threads
allowed in your machine. default value is 1.
Attributes and functions
----------
_mf_func : returns the value of the function at point 'x' evaluated at fidelity 'z'
For other methods see the specifications in mf/mf_func.
'''
def __init__(self, X,y,estimator, param_dict,fidelity_range, \
scoring='accuracy', greater_is_better = True, fixed_params = {},\
n_jobs=1, cv = 3):
self.base_estimator = estimator
self.param_dict = param_dict
self.scoring = scoring
self.fixed_params = fixed_params
self.n_jobs = n_jobs
self.fidelity_range = fidelity_range
self.cv = cv
self.fidelity_range = fidelity_range
self.X = X
self.y = y
self.greater_is_better = greater_is_better
self.scorer = return_scoring_function(self.scoring)
self.problem_bounds, self.keys = convert_dict_to_bounds(self.param_dict)
self.max_data = self.fidelity_range[1]
mf_func = self._mf_func
fidel_cost_func = self._fidel_cost
fidel_bounds = np.array([self.fidelity_range])
domain_bounds = np.array(self.problem_bounds)
opt_fidel_unnormalised = np.array([self.max_data])
super(MFTreeFunction, self).__init__(mf_func, fidel_cost_func, fidel_bounds,
domain_bounds, opt_fidel_unnormalised,
vectorised=False)
def _fidel_cost(self, z):
return 0.01 + (float(z[0])/self.max_data)
def _mf_func(self, z, x):
pgrid = convert_values_to_dict(list(x),self.problem_bounds,self.keys, self.param_dict)
grid = merge_two_dicts(pgrid,self.fixed_params)
gbm = self.base_estimator
gbm.set_params(**grid)
r,c = self.X.shape
num_data_curr = int(z[0])
inds = np.random.choice(r,num_data_curr)
feat_curr = self.X[inds]
label_curr = self.y[inds]
return self.get_kfold_val_score(gbm, feat_curr, label_curr)
def get_kfold_val_score(self,clf, X, Y, num_folds=None,random_seed = 512):
st0 = np.random.get_state()
if random_seed is None:
np.random.seed()
else:
np.random.seed(random_seed)
num_folds = self.cv
acc = cross_val_score(clf,X = X,y = Y,cv=num_folds,n_jobs=self.n_jobs,scoring=self.scoring)
np.random.set_state(st0)
if self.greater_is_better:
return acc.mean()
else:
return -acc.mean()
| 6,789 | 34.549738 | 146 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/MFTreeSearchCV/MFTreeSearchCV.py
|
# Author: Rajat Sen
# This is the main source file that implements the methods MFTreeSearchCV
from __future__ import print_function
from __future__ import division
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from sklearn.model_selection import GridSearchCV
from MFTreeSearchCV.converters import *
from MFTreeSearchCV.MFTreeFunction import *
import numpy as np
from mf.mf_func import MFOptFunction
from utils.general_utils import map_to_cube
import sys
from mf.mf_func import get_noisy_mfof_from_mfof
import time
from MFTreeSearchCV.MFHOO import *
import pandas as pd
class MFTreeSearchCV(GridSearchCV):
"""Multi-Fidelity Tree Search over specified parameter ranges for an estimator.
Important members are fit, predict.
MFTreeSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba" is they are present in the base-estimator.
The parameters of the estimator used to apply these methods are optimized
by cross-validated Tree Search over a parameter search space.
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Unlike grid search CV, estimator need not provide a ``score`` function.
Therefore ``scoring`` must be passed.
param_dict : Dictionary with parameters names (string) as keys and and the value is another dictionary. The value dictionary has
the keys 'range' that specifies the range of the hyper-parameter, 'type': 'int' or 'cat' or 'real' (integere, categorical or real),
and 'scale': 'linear' or 'log' specifying whether the search is done on a linear scale or a logarithmic scale. An example for param_dict
for scikit-learn SVC is as follows:
eg: param_dict = {'C' : {'range': [1e-2,1e2], 'type': 'real', 'scale': 'log'}, \
'kernel' : {'range': [ 'linear', 'poly', 'rbf', 'sigmoid'], 'type': 'cat'}, \
'degree' : {'range': [3,10], 'type': 'int', 'scale': 'linear'}}
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`). this must be specified as a string. See scikit-learn metrics
for more details.
fixed_params: dictionary of parameter values other than the once in param_dict, that should be held fixed at the supplied value.
For example, if fixed_params = {'nthread': 10} is passed with estimator as XGBoost, it means that all
XGBoost instances will be run with 10 parallel threads
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
debug : Binary
Controls the verbosity: True means more messages, while False only prints critical messages
refit : True means the best parameters are fit into an estimator and trained, while False means the best_estimator is not refit
fidelity_range : range of fidelity to use. It is a tuple (a,b) which means lowest fidelity means a samples are used for training and
validation and b samples are used when fidelity is the highest. We recommend setting b to be the total number of training samples
available and a to bea reasonable value.
n_jobs : number of parallel runs for the CV. Note that njobs * (number of threads used in the estimator) must be less than the number of threads
allowed in your machine. default value is 1.
nu_max : automatically set, but can be give a default values in the range (0,2]
rho_max : rho_max in the paper. Default value is 0.95 and is recommended
sigma : sigma in the paper. Default value is 0.02, adjust according to the believed noise standard deviation in the system
C : default is 1.0, which is overwritten if Auto = True, which is the recommended setting
Auto : If True then the bias function parameter C is auto set. This is recommended.
tol : default values is 1e-3. All fidelities z_1, z_2 such that |z_1 - z_2| < tol are assumed to yield the same bias value
total_budget : total budget for the search in seconds. This includes the time for automatic parameter C selection and does not include refit time.
total_budget should ideally be more than 5X the unit_cost which is the time taken to run one experiment at the highest fidelity
unit_cost : time in seconds required to fit the base estimator at the highest fidelity. This should be estimated by the user and then supplied.
Attributes
----------
cv_results_ : dictionary showing the scores attained under a few parameters setting. Each
parameter setting is the best parameter obtained from a tree-search call.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
fit_time_ : float
Seconds taken to find the best parameters. Should be close to the budget given.
"""
def __init__(self, estimator, param_dict, fidelity_range,total_budget, scoring='accuracy',\
greater_is_better = True, fixed_params = {},\
refit=True, cv = 3, debug = True, n_jobs = 1, \
nu_max = 1.0, rho_max = 0.95, sigma = 0.02, C = 0.05, \
tol = 1e-3, \
Randomize = False, Auto = True, unit_cost = None,mult = 0.2):
param_grid = {}
for key in param_dict:
p = param_dict[key]
param_grid[key] = [p['range'][0]]
super(MFTreeSearchCV, self).__init__(
estimator=estimator, param_grid = param_grid,scoring=scoring,
n_jobs=n_jobs, iid='warn', refit=refit, cv=cv, verbose=debug)
self.estimator = estimator
self.param_dict = param_dict
self.scoring = scoring
self.greater_is_better = greater_is_better
self.fixed_params = fixed_params
self.n_jobs = n_jobs
self.fidelity_range = fidelity_range
self.refit = refit
self.cv = cv
self.debug = debug
self.nu_max = nu_max
self.rho_max = rho_max
self.sigma = sigma
self.C = C
self.tol = tol
self.fidelity_range = fidelity_range
self.total_budget = total_budget
self.unit_cost = unit_cost
self.mult = mult
self.Randomize = Randomize
self.Auto = Auto
def _create_mfobject(self,X,y):
MF = MFTreeFunction(X,y,self.estimator, self.param_dict,\
self.fidelity_range, \
self.scoring, self.greater_is_better, self.fixed_params,\
self.n_jobs, self.cv)
return MF
def _populate_cv_results(self,points,evals):
self.cv_results_ = {}
for i in range(len(points)):
pr = convert_values_to_dict(points[i],self.MF.problem_bounds,\
self.MF.keys, self.MF.param_dict)
self.cv_results_[i] = {'params':pr,'score':evals[i]}
self.cv_results_ = pd.DataFrame(self.cv_results_).transpose()
def _refit(self,X,y):
params = merge_two_dicts(self.best_params_,self.fixed_params)
self.best_estimator_ = self.estimator.set_params(**params)
self.best_estimator = self.best_estimator_.fit(X,y)
def fit(self,X,y):
self.MF = self._create_mfobject(X,y)
t1 = time.time()
self.MP = MFPOO(mfobject=self.MF, nu_max=self.nu_max, rho_max=self.rho_max, \
total_budget=self.total_budget, sigma=self.sigma, C=self.C, \
mult=self.mult, tol = self.tol, Randomize = self.Randomize, \
Auto = self.Auto,unit_cost=self.unit_cost,\
CAPITAL = 'Time', debug = self.debug )
self.MP.run_all_MFHOO()
self.points, self.evals = self.MP.get_point()
t2 = time.time()
self.fit_time_ = t2 - t1
index = np.argmax(self.evals)
bp = self.points[index]
self.best_params_ = convert_values_to_dict(bp,self.MF.problem_bounds,self.MF.keys, self.MF.param_dict)
self.best_score_ = self.evals[index]
self._populate_cv_results(self.points,self.evals)
if self.refit:
t1 = time.time()
self._refit(X,y)
t2 = time.time()
self.refit_time_ = t2 - t1
return self
else:
return self
| 8,272 | 34.813853 | 148 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/MFTreeSearchCV/__init__.py
|
# Code for MFTreeSearchCV
# Contact: [email protected]
from __future__ import division
from __future__ import print_function
| 129 | 20.666667 | 37 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/MFTreeSearchCV/converters.py
|
# Author: Rajat Sen
### This file is meant for functions that interchange various methods of representing the range and values for various hyper-parameters.
from __future__ import print_function
from __future__ import division
import numpy as np
def convert_dict_to_bounds(param_dict):
'''
convert param_dict to list of parameters
Returned values:---------------------
problem_bounds: list of ranges for different parameters of dimensions
keys: list of keys in the same order as problem bounds
'''
problem_bounds = []
keys = []
for key in param_dict:
param = param_dict[key]
if param['type'] == 'cat':
bound = [0,1]
scale = 'linear'
else:
if param['scale'] == 'linear':
bound = param['range']
else:
bound = [np.log(param['range'][0]),np.log(param['range'][1])]
scale = param['scale']
problem_bounds = problem_bounds + [bound]
keys = keys + [key]
return problem_bounds,keys
def indexify(v,r):
'''
Helper Function
'''
for i in range(r):
if float(i)/r <= v < float(i+1)/r:
return i
else:
continue
return r-1
def convert_values_to_dict(values,problem_bounds,keys, param_dict):
'''
Function to convert a vector of values for different hyper-parameters to a dict
that can be used to set parameters of the base estimator object
'''
vdict = {}
n = len(values)
for i in range(n):
v = values[i]
k = keys[i]
param = param_dict[k]
if param['type'] == 'cat':
r = len(param['range'])
index = indexify(v,r)
vdict[k] = param['range'][index]
else:
if param['scale'] == 'log':
nv = np.exp(v)
else:
nv = v
if param['type'] == 'int':
nv = int(nv)
vdict[k] = nv
return vdict
| 1,689 | 18.204545 | 137 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/gp_instances.py
|
"""
A module which implements different instances of GPs.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
import gp_core
import kernel
from utils.option_handler import get_option_specs
# Some basic parameters for simple GPs.
basic_gp_args = [
get_option_specs('kernel_type', False, 'se',
'Specify type of kernel. Should be se or poly'),
get_option_specs('mean_func_type', False, 'median',
('Specify the type of mean function. Should be mean, median, const ',
'or zero. If const, specifcy value in mean-func-const.')),
get_option_specs('mean_func_const', False, 0.0,
'The constant value to use if mean_func_type is const.'),
get_option_specs('noise_var_type', False, 'tune',
('Specify how to obtain the noise variance. Should be tune, label ',
'or value. Specify appropriate value in noise_var_label or',
'noise_var_value')),
get_option_specs('noise_var_label', False, 0.05,
'The fraction of label variance to use as noise variance.'),
get_option_specs('noise_var_value', False, 0.1,
'The (absolute) value to use as noise variance.'),
]
# Parameters for the SE kernel.
se_gp_args = [
get_option_specs('use_same_bandwidth', False, False,
'If true uses same bandwidth on all dimensions. Default is False.'),
]
# Parameters for the Polynomial kernel.
poly_gp_args = [
get_option_specs('use_same_scalings', False, False,
'If true uses same scalings on all dimensions. Default is False.'),
get_option_specs('poly_order', False, 1,
'Order of the polynomial to be used. Default is 1 (linear kernel).')
]
# All parameters
all_simple_gp_args = gp_core.mandatory_gp_args + basic_gp_args + se_gp_args + poly_gp_args
class SEGP(gp_core.GP):
""" An implementation of a GP using a SE kernel. """
def __init__(self, X, Y, ke_scale, ke_dim_bandwidths, mean_func, noise_var,
*args, **kwargs):
""" Constructor. ke_scale and ke_dim_bandwidths are the kernel hyper-parameters.
ke_dim_bandwidths can be a vector of length dim or a scalar (in which case we
will use the same bandwidth for all dimensions).
"""
se_kernel = kernel.SEKernel(dim=X.shape[1], scale=ke_scale,
dim_bandwidths=ke_dim_bandwidths)
super(SEGP, self).__init__(X, Y, se_kernel, mean_func, noise_var, *args, **kwargs)
class PolyGP(gp_core.GP):
""" An implementation of a GP using a polynomial kernel. """
def __init__(self, X, Y, ke_order, ke_dim_scalings, mean_func, noise_var,
*args, **kwargs):
""" Constructor. ke_order and ke_dim_scalings are the kernel hyper-parameters.
see kernel.PolyKernel for more info.
"""
poly_kernel = kernel.PolyKernel(dim=X.shape[1], order=ke_order,
dim_scalings=ke_dim_scalings)
super(PolyGP, self).__init__(X, Y, poly_kernel, mean_func, noise_var, *args, **kwargs)
class SimpleGPFitter(gp_core.GPFitter):
""" A concrete implementation to fit a simple GP. Use this as an example."""
# pylint: disable=attribute-defined-outside-init
def __init__(self, X, Y, options=None, *args, **kwargs):
""" Constructor.
options should either be a Namespace, a list or None"""
# Just call the super constructor.
self.X = X
self.Y = Y
if options is None:
options = all_simple_gp_args
super(SimpleGPFitter, self).__init__(options, *args, **kwargs)
def _child_set_up(self):
""" Sets parameters for GPFitter. """
# Check args - so that we don't have to keep doing this all the time
if not self.options.kernel_type in ['se', 'kernel']:
raise ValueError('Unknown kernel_type. Should be either se or poly.')
if not self.options.noise_var_type in ['tune', 'label', 'value']:
raise ValueError('Unknown noise_var_type. Should be either tune, label or value.')
if not self.options.mean_func_type in ['mean', 'median', 'const', 'zero']:
raise ValueError('Unknown mean_func_type. Should be one of mean/median/const/zero.')
# Set some parameters we will be using often.
self.Y_var = self.Y.std()**2
self.input_dim = self.X.shape[1]
# Bounds for the hyper-parameters
self.hp_bounds = []
# Noise variance
if self.options.noise_var_type == 'tune':
self.noise_var_log_bounds = [np.log(0.005 * self.Y_var), np.log(0.2 * self.Y_var)]
self.hp_bounds.append(self.noise_var_log_bounds)
# Kernel parameters
if self.options.kernel_type == 'se':
self._se_kernel_set_up()
elif self.options.kernel_type == 'poly':
self._poly_kernel_set_up()
def _se_kernel_set_up(self):
""" Set up for the SE kernel. """
# Scale
self.scale_log_bounds = [np.log(0.1 * self.Y_var), np.log(10 * self.Y_var)]
# Bandwidths
X_std_norm = np.linalg.norm(self.X.std(axis=0))
single_bandwidth_log_bounds = [np.log(0.01 * X_std_norm), np.log(10 * X_std_norm)]
if self.options.use_same_bandwidth:
self.bandwidth_log_bounds = [single_bandwidth_log_bounds]
else:
self.bandwidth_log_bounds = [single_bandwidth_log_bounds] * self.input_dim
self.hp_bounds += [self.scale_log_bounds] + self.bandwidth_log_bounds
def _poly_kernel_set_up(self):
""" Set up for the Poly kernel. """
# TODO: Implement poly kernel set up.
raise NotImplementedError('Not implemented Poly kernel yet.')
def _child_build_gp(self, gp_hyperparams):
""" Builds the GP. """
# Noise variance ####################################
if self.options.noise_var_type == 'tune':
noise_var = np.exp(gp_hyperparams[0])
gp_hyperparams = gp_hyperparams[1:]
elif self.options.noise_var_type == 'label':
noise_var = self.options.noise_var_label * (self.Y.std()**2)
else:
noise_var = self.options.noise_var_value
# Mean function #####################################
if hasattr(self.options, 'mean_func') and self.options.mean_func is not None:
mean_func = self.options.mean_func
else:
if self.options.mean_func_type == 'mean':
mean_func_const_value = self.Y.mean()
elif self.options.mean_func_type == 'median':
mean_func_const_value = np.median(self.Y)
elif self.options.mean_func_type == 'const':
mean_func_const_value = self.options.mean_func_const
else:
mean_func_const_value = 0
mean_func = lambda x: np.array([mean_func_const_value] * len(x))
# Build kernels and return ##########################
if self.options.kernel_type == 'se':
return self._build_se_gp(noise_var, mean_func, gp_hyperparams)
elif self.options.kernel_type == 'poly':
return self._build_poly_gp(noise_var, mean_func, gp_hyperparams)
def _build_se_gp(self, noise_var, mean_func, gp_hyperparams):
""" Builds the GP if using an SE kernel. """
# Kernel parameters
ke_scale = np.exp(gp_hyperparams[0])
ke_dim_bandwidths = (
[np.exp(gp_hyperparams[1])] * self.input_dim if self.options.use_same_bandwidth
else np.exp(gp_hyperparams[1:]))
# return an squared exponential GP.
return SEGP(self.X, self.Y, ke_scale, ke_dim_bandwidths, mean_func, noise_var,
build_posterior=True, reporter=self.reporter)
def _build_poly_gp(self, noise_var, mean_func, gp_hyperparams):
""" Builds the GP if using a Poly kernel. """
# TODO: Implement poly kernel build.
raise NotImplementedError('Not implemented Poly kernel yet.')
| 7,818 | 42.198895 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/unittest_gp_core.py
|
"""
Unit tests for gp.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
from utils.base_test_class import BaseTestClass, execute_tests
import numpy as np
# Local
import gp_core
import kernel
def gen_gp_test_data():
""" This function generates a bunch of test data. """
# pylint: disable=too-many-locals
# Dataset 1
f1 = lambda x: (x**2).sum(axis=1)
N1 = 5
X1_tr = np.array(range(N1)).astype(float).reshape((N1, 1))/N1 + 1/(2*N1)
Y1_tr = f1(X1_tr)
X1_te = np.random.random((50, 1))
Y1_te = f1(X1_te)
kernel1 = kernel.SEKernel(1, 1, 0.5)
# Dataset 2
N2 = 100
D2 = 10
f2 = lambda x: ((x**2) * range(1, D2+1)/D2).sum(axis=1)
X2_tr = np.random.random((N2, D2))
Y2_tr = f2(X2_tr)
X2_te = np.random.random((N2, D2))
Y2_te = f2(X2_te)
kernel2 = kernel.SEKernel(D2, 10, 0.2*np.sqrt(D2))
# Dataset 3
N3 = 200
D3 = 6
f3 = lambda x: ((x**3 + 2 * x**2 - x + 2) * range(1, D3+1)/D3).sum(axis=1)
X3_tr = np.random.random((N3, D3))
Y3_tr = f3(X3_tr)
X3_te = np.random.random((N3, D3))
Y3_te = f3(X3_te)
kernel3 = kernel.SEKernel(D3, 10, 0.2*np.sqrt(D3))
# Dataset 4
N4 = 400
D4 = 8
f4 = lambda x: ((np.sin(x**2) + 2 * np.cos(x**2) - x + 2) *
range(1, D4+1)/D4).sum(axis=1)
X4_tr = np.random.random((N4, D4))
Y4_tr = f4(X4_tr)
X4_te = np.random.random((N4, D4))
Y4_te = f4(X4_te)
kernel4 = kernel.SEKernel(D4, 10, 0.2*np.sqrt(D4))
# put all datasets into a list.
return [(X1_tr, Y1_tr, kernel1, X1_te, Y1_te),
(X2_tr, Y2_tr, kernel2, X2_te, Y2_te),
(X3_tr, Y3_tr, kernel3, X3_te, Y3_te),
(X4_tr, Y4_tr, kernel4, X4_te, Y4_te)]
def build_gp_with_dataset(dataset):
""" Internal function to build a GP with the dataset. """
mean_func = lambda x: np.array([np.median(dataset[1])] * len(x))
noise_var = dataset[1].std()**2/20
return gp_core.GP(dataset[0], dataset[1], dataset[2], mean_func, noise_var)
def compute_average_prediction_error(dataset, preds):
""" Computes the prediction error. """
return (np.linalg.norm(dataset[4] - preds)**2)/len(dataset[4])
class GPTestCase(BaseTestClass):
""" Unit tests for the GP class. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for tests. """
self.datasets = gen_gp_test_data()
def test_add_data(self):
""" Tests GP.add_data. """
self.report('GP.add_data')
for dataset in self.datasets:
num_new = np.random.randint(3, 10)
X_new = np.random.random((num_new, dataset[0].shape[1]))
Y_new = np.random.random(num_new)
curr_gp = build_gp_with_dataset(dataset)
curr_gp.add_data(X_new, Y_new)
assert num_new + len(dataset[1]) == curr_gp.num_tr_data
def test_eval(self):
""" Tests the evaluation. """
self.report('GP.eval: Probabilistic test, might fail sometimes')
num_successes = 0
for dataset in self.datasets:
curr_gp = build_gp_with_dataset(dataset)
curr_pred, _ = curr_gp.eval(dataset[3])
curr_err = compute_average_prediction_error(dataset, curr_pred)
const_err = compute_average_prediction_error(dataset, dataset[1].mean())
success = curr_err < const_err
self.report(('(N,D)=' + str(dataset[0].shape) + ':: GP-err= ' + str(curr_err) +
', Const-err= ' + str(const_err) + ', success=' + str(success)),
'test_result')
num_successes += int(success)
assert num_successes > 0.6 *len(self.datasets)
def test_compute_log_marginal_likelihood(self):
""" Tests compute_log_marginal_likelihood. Does not test for accurate implementation.
Only tests if the function runs without runtime errors. """
self.report('GP.compute_log_marginal_likelihood: ** Runtime test errors only **')
for dataset in self.datasets:
curr_gp = build_gp_with_dataset(dataset)
lml = curr_gp.compute_log_marginal_likelihood()
self.report('(N,D)=' + str(dataset[0].shape) + ' lml = ' + str(lml),
'test_result')
if __name__ == '__main__':
execute_tests()
| 4,143 | 32.691057 | 89 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/demo_gp_1.py
|
"""
A simple demo for gps.
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=superfluous-parens
from argparse import Namespace
import numpy as np
# Local
import gp_core
import gp_instances
import kernel
def get_data():
""" Generates data. """
func = lambda t: (-70 * (t-0) * (t-0.35) * (t+0.55) * (t-0.65) * (t-0.97)).sum(axis=1)
N = 5
X_tr = np.array(range(N)).astype(float).reshape((N, 1))/N + 1/(float(2*N))
Y_tr = func(X_tr)
kern = kernel.SEKernel(1, 1, 0.5)
data = Namespace(func=func, X_tr=X_tr, Y_tr=Y_tr, kern=kern)
return data
def _demo_common(gp, data, desc):
""" Common processes for the demo. """
lml = gp.compute_log_marginal_likelihood()
print(desc + ': Log-Marg-Like: ' + str(lml) + ', kernel: ' + str(gp.kernel.hyperparams))
gp.visualise(true_func=data.func, boundary=[0, 1])
def demo_gp_given_hps(data, kern, desc):
""" A demo given the kernel hyper-parameters. """
mean_func = lambda x: np.array([data.Y_tr.mean()] * len(x))
noise_var = data.Y_tr.std()/10
est_gp = gp_core.GP(data.X_tr, data.Y_tr, kern, mean_func, noise_var)
_demo_common(est_gp, data, desc)
def demo_gp_fit_hps(data, desc):
""" A demo where the kernel hyper-parameters are fitted. """
fitted_gp, _ = (gp_instances.SimpleGPFitter(data.X_tr, data.Y_tr)).fit_gp()
_demo_common(fitted_gp, data, desc)
def main():
""" Main function. """
data = get_data()
print('First fitting a GP with the given kernel. Close window to continue.')
demo_gp_given_hps(data, data.kern, 'Given Kernel')
print('\nNow estimating kernel via marginal likelihood. Close window to continue.')
demo_gp_fit_hps(data, 'Fitted Kernel')
if __name__ == '__main__':
main()
| 1,784 | 29.254237 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/gp_core.py
|
"""
A module for fitting a GP and tuning its kernel.
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import sys
import numpy as np
from utils.general_utils import stable_cholesky, draw_gaussian_samples
from utils.optimisers import direct_ft_maximise
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter
# These are mandatory requirements. Every GP implementation should probably use them.
mandatory_gp_args = [
get_option_specs('hp_tune_criterion', False, 'ml',
'Which criterion to use when tuning hyper-parameters.'),
get_option_specs('hp_tune_opt', False, 'direct',
'Which optimiser to use when maximising the tuning criterion.'),
get_option_specs('hp_tune_max_evals', False, -1,
'How many evaluations to use when maximising the tuning criterion.'),
]
def _check_feature_label_lengths_and_format(X, Y):
""" Checks if the length of X and Y are the same. """
if X.shape[0] != len(Y):
raise ValueError('Size of X (' + str(X.shape) + ') and Y (' +
str(Y.shape) + ') do not match.')
if len(X.shape) != 2 or len(Y.shape) != 1:
raise ValueError('X should be an nxd matrix and Y should be an n-vector.' +
'Given shapes of X, Y are: ', str(X.shape) + ', ' + str(Y.shape))
class GP(object):
'''
Base class for Gaussian processes.
'''
def __init__(self, X, Y, kernel, mean_func, noise_var, build_posterior=True,
reporter=None):
""" Constructor. """
super(GP, self).__init__()
_check_feature_label_lengths_and_format(X, Y)
self.X = X
self.Y = Y
self.kernel = kernel
self.mean_func = mean_func
self.noise_var = noise_var
self.reporter = reporter
# Some derived attribues.
self.num_tr_data = len(self.Y)
self.input_dim = self.X.shape[1]
# Initialise other attributes we will need.
self.L = None
self.alpha = None
# Build posterior if necessary
if build_posterior:
self.build_posterior()
def _write_message(self, msg):
""" Writes a message via the reporter or the std out. """
if self.reporter:
self.reporter.write(msg)
else:
sys.stdout.write(msg)
def add_data(self, X_new, Y_new, rebuild=True):
""" Adds new data to the GP. If rebuild is true it rebuilds the posterior. """
_check_feature_label_lengths_and_format(X_new, Y_new)
self.X = np.vstack((self.X, X_new))
self.Y = np.append(self.Y, Y_new)
self.num_tr_data = len(self.Y)
if rebuild:
self.build_posterior()
def build_posterior(self):
""" Builds the posterior GP by computing the mean and covariance. """
prior_covar = self.kernel(self.X, self.X) + self.noise_var * np.eye(self.num_tr_data)
Y_centred = self.Y - self.mean_func(self.X)
self.L = stable_cholesky(prior_covar)
self.alpha = np.linalg.solve(self.L.T, np.linalg.solve(self.L, Y_centred))
def eval(self, X_test, uncert_form='none'):
""" Evaluates the GP on X_test. If uncert_form is
covar: returns the entire covariance on X_test (nxn matrix)
std: returns the standard deviations on the test set (n vector)
none: returns nothing (default).
"""
# First check for uncert_form
if not uncert_form in ['none', 'covar', 'std']:
raise ValueError('uncert_form should be one of none, std or covar.')
# Compute the posterior mean.
test_mean = self.mean_func(X_test)
K_tetr = self.kernel(X_test, self.X)
pred_mean = test_mean + K_tetr.dot(self.alpha)
# Compute the posterior variance or standard deviation as required.
if uncert_form == 'none':
uncert = None
else:
K_tete = self.kernel(X_test, X_test)
V = np.linalg.solve(self.L, K_tetr.T)
post_covar = K_tete - V.T.dot(V)
if uncert_form == 'covar':
uncert = post_covar
elif uncert_form == 'std':
uncert = np.sqrt(np.diag(post_covar))
else:
raise ValueError('uncert_form should be none, covar or std.')
return (pred_mean, uncert)
def compute_log_marginal_likelihood(self):
""" Computes the log marginal likelihood. """
Y_centred = self.Y - self.mean_func(self.X)
ret = -0.5 * Y_centred.T.dot(self.alpha) - (np.log(np.diag(self.L))).sum() \
- 0.5 * self.num_tr_data * np.log(2*np.pi)
return ret
def draw_samples(self, num_samples, X_test=None, mean_vals=None, covar=None):
""" Draws num_samples samples at returns their values at X_test. """
if X_test is not None:
mean_vals, covar = self.eval(X_test, 'covar')
return draw_gaussian_samples(num_samples, mean_vals, covar)
def visualise(self, file_name=None, boundary=None, true_func=None,
num_samples=20, conf_width=3):
""" Visualises the GP. """
# pylint: disable=unused-variable
# pylint: disable=too-many-locals
if self.input_dim != 1:
self._write_message('Cannot visualise in greater than 1 dimension.\n')
else:
import matplotlib.pyplot as plt
fig = plt.figure()
N = 400
leg_handles = []
leg_labels = []
if not boundary:
boundary = [self.X.min(), self.X.max()]
grid = np.linspace(boundary[0], boundary[1], N).reshape((N, 1))
(pred_vals, pred_stds) = self.eval(grid, 'std')
# Shade a high confidence region
conf_band_up = pred_vals + conf_width * pred_stds
conf_band_down = pred_vals - conf_width * pred_stds
leg_conf = plt.fill_between(grid.ravel(), conf_band_up, conf_band_down,
color=[0.9, 0.9, 0.9])
# Plot the samples
gp_samples = self.draw_samples(num_samples, grid)
plt.plot(grid, gp_samples.T, '--', linewidth=0.5)
# plot the true function if available.
if true_func:
leg_true = plt.plot(grid, true_func(grid), 'b--', linewidth=3,
label='true function')
leg_handles.append(leg_true)
# Plot the posterior mean
leg_post_mean = plt.plot(grid, pred_vals, 'k-', linewidth=4,
label='post mean')
# Finally plot the training data.
leg_data = plt.plot(self.X, self.Y, 'kx', mew=4, markersize=10,
label='data')
# TODO: create a legend.
# Finally either plot or show the figure
if file_name is None:
plt.show()
else:
fig.savefig(file_name)
class GPFitter(object):
"""
Class for fitting Gaussian processes.
"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=abstract-class-not-used
def __init__(self, options, reporter='default'):
""" Constructor. """
super(GPFitter, self).__init__()
self.reporter = get_reporter(reporter)
if isinstance(options, list):
options = load_options(options, 'GP', reporter=self.reporter)
self.options = options
self._set_up()
def _set_up(self):
""" Sets up a bunch of ancillary parameters. """
# The following hyper-parameters need to be set mandatorily in _child_setup.
self.hp_bounds = None # The bounds for each hyper parameter should be a num_hps x 2
# array where the 1st/2nd columns are the lowe/upper bounds.
# Set up hyper-parameters for the child.
self._child_set_up()
self.hp_bounds = np.array(self.hp_bounds)
# Some post child set up
self.num_hps = len(self.hp_bounds) # The number of hyper parameters
# The optimiser for the hyper parameters
if self.options.hp_tune_opt == 'direct':
self._direct_set_up()
else:
raise ValueError('hp_tune_opt should be direct.')
def _child_set_up(self):
""" Here you should set up parameters for the child, such as the bounds for the
optimiser etc. """
raise NotImplementedError('Implement _child_set_up in a child method.')
def _direct_set_up(self):
""" Sets up optimiser for direct. """
# define the following internal function to abstract things out more.
def _direct_wrap(*args):
""" A wrapper so as to only return the optimal value. """
_, opt_pt, _ = direct_ft_maximise(*args)
return opt_pt
# Set some parameters
lower_bounds = self.hp_bounds[:, 0]
upper_bounds = self.hp_bounds[:, 1]
if (hasattr(self.options, 'hp_tune_max_evals') and
self.options.hp_tune_max_evals is not None and
self.options.hp_tune_max_evals > 0):
hp_tune_max_evals = self.options.hp_tune_max_evals
else:
hp_tune_max_evals = min(1e5, max(300, self.num_hps * 30))
# Set hp_optimise
self.hp_optimise = lambda obj: _direct_wrap(obj,
lower_bounds, upper_bounds, hp_tune_max_evals)
def _build_gp(self, gp_hyperparams):
""" A method which builds a GP from the given gp_hyperparameters. It calls
_child_build_gp after running some checks. """
# Check the length of the hyper-parameters
if self.num_hps != len(gp_hyperparams):
raise ValueError('gp_hyperparams should be of length %d. Given length: %d.'%(
self.num_hps, len(gp_hyperparams)))
return self._child_build_gp(gp_hyperparams)
def _child_build_gp(self, gp_hyperparams):
""" A method which builds the child GP from the given gp_hyperparameters. Should be
implemented in a child method. """
raise NotImplementedError('Implement _build_gp in a child method.')
def _tuning_objective(self, gp_hyperparams):
""" This function computes the tuning objective (such as the marginal likelihood)
which is to be maximised in fit_gp. """
built_gp = self._build_gp(gp_hyperparams)
if self.options.hp_tune_criterion in ['ml', 'marginal_likelihood']:
ret = built_gp.compute_log_marginal_likelihood()
elif self.options.hp_tune_criterion in ['cv', 'cross_validation']:
raise NotImplementedError('Yet to implement cross validation based hp-tuning.')
else:
raise ValueError('hp_tune_criterion should be either ml or cv')
return ret
def fit_gp(self):
""" Fits a GP according to the tuning criterion. Returns the best GP along with the
hyper-parameters. """
opt_hps = self.hp_optimise(self._tuning_objective)
opt_gp = self._build_gp(opt_hps)
return opt_gp, opt_hps
| 10,315 | 38.075758 | 89 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/__init__.py
|
"""
A Harness for creating and using various kinds of GPs.
-- [email protected]
"""
| 92 | 14.5 | 56 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/unittest_gp_instances.py
|
"""
Unit tests for gp_instances.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# Local
from gp_instances import SimpleGPFitter
from utils.base_test_class import BaseTestClass, execute_tests
from unittest_gp_core import build_gp_with_dataset, compute_average_prediction_error
from unittest_gp_core import gen_gp_test_data
def fit_gp_with_dataset(dataset):
""" A wrapper to fit a gp using the dataset. """
fitted_gp, _ = (SimpleGPFitter(dataset[0], dataset[1], reporter=None)).fit_gp()
return fitted_gp
class SimpleGPFitterTestCase(BaseTestClass):
""" Unit tests for the GP class. """
# pylint: disable=too-many-locals
def setUp(self):
""" Set up for the tests. """
self.datasets = gen_gp_test_data()
def test_marg_likelihood(self):
""" This tests for the marginal likelihood. Since we are fitting the hyper parameters
by maximising the marginal likelihood it should have a higher value. """
self.report('Marginal likelihood. Probabilistic test, might fail.')
num_successes = 0
for dataset in self.datasets:
naive_gp = build_gp_with_dataset(dataset)
fitted_gp = fit_gp_with_dataset(dataset)
naive_lml = naive_gp.compute_log_marginal_likelihood()
fitted_lml = fitted_gp.compute_log_marginal_likelihood()
success = naive_lml <= fitted_lml
self.report('(N,D)=%s:: naive-lml=%0.4f, fitted-lml=%0.4f, succ=%d'%(
str(dataset[0].shape), naive_lml, fitted_lml, success), 'test_result')
num_successes += success
assert num_successes == len(self.datasets)
def test_prediction(self):
""" Tests for prediction on a test set.
"""
self.report('Prediction. Probabilistic test, might fail.')
num_successes = 0
for dataset in self.datasets:
naive_gp = build_gp_with_dataset(dataset)
naive_preds, _ = naive_gp.eval(dataset[3])
naive_err = compute_average_prediction_error(dataset, naive_preds)
fitted_gp = fit_gp_with_dataset(dataset)
fitted_preds, _ = fitted_gp.eval(dataset[3])
fitted_err = compute_average_prediction_error(dataset, fitted_preds)
success = fitted_err <= naive_err
self.report('(N,D)=%s:: naive-err=%0.4f, fitted-err=%0.4f, succ=%d'%(
str(dataset[0].shape), naive_err, fitted_err, success), 'test_result')
num_successes += success
assert num_successes > 0.6 *len(self.datasets)
if __name__ == '__main__':
execute_tests(5424)
| 2,541 | 35.314286 | 89 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/unittest_kernel.py
|
"""
Unit tests for kernel.py
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
# pylint: disable=abstract-class-little-used
import numpy as np
# Local imports
import kernel
from utils.general_utils import dicts_are_equal
from utils.base_test_class import BaseTestClass, execute_tests
class KernelBasicTestCase(BaseTestClass):
""" Basic tests for Kernel function """
# pylint: disable=too-many-instance-attributes
def setUp(self):
""" Sets up attributes. """
# Data for generic tests
self.num_data_1 = 2
self.num_data_2 = 3
self.dim = 3
self.hyper_param_dict_1 = {"param_1":1, "param_2":2, "param_3":3}
self.hyper_param_dict_2 = {"param_4":4, "param_5":5, "param_6":6}
self.hyper_param_dict_3 = {"param_1":1, "param_2":2, "param_3":3, "param_4":4,
"param_5":5, "param_6":6}
# Data for the SE and poly kernels
self.se_scale = 2
self.data_1 = np.array([[1, 2], [3, 4.5]])
self.data_2 = np.array([[1, 2], [3, 4]])
# Data for the squared exponential kernel.
self.se_dim_bandwidths = [0.1, 1]
self.true_se_vals_11 = self.se_scale * np.array([[1, np.exp(-406.25/2)],
[np.exp(-406.25/2), 1]])
self.true_se_vals_22 = self.se_scale * np.array([[1, np.exp(-404/2)],
[np.exp(-404/2), 1]])
self.true_se_vals_12 = self.se_scale * np.array([[1, np.exp(-404/2)],
[np.exp(-406.25/2), np.exp(-0.25/2)]])
# Data for the polynomial kernel
self.poly_order = 3
self.poly_dim_scalings = [0.5, 2]
self.poly_scale = 2
self.true_poly_vals_11 = self.poly_scale * np.array([[17.25, 37.75],
[37.75, 84.25]])**self.poly_order
self.true_poly_vals_22 = self.poly_scale * np.array([[17.25, 33.75],
[33.75, 67.25]])**self.poly_order
self.true_poly_vals_12 = self.poly_scale * np.array([[17.25, 33.75],
[37.75, 75.25]])**self.poly_order
# Data for the unscaled polynomial kernel.
self.unscaled_dim_scalings = [0.8, 0.5, 2]
self.true_upoly_vals_11 = np.array([[16.89, 37.39], [37.39, 83.89]])**self.poly_order
self.true_upoly_vals_22 = np.array([[16.89, 33.39], [33.39, 66.89]])**self.poly_order
self.true_upoly_vals_12 = np.array([[16.89, 33.39], [37.39, 74.89]])**self.poly_order
# Data for the combined kernel
self.com_scale = 4.3
self.true_comb_11 = self.com_scale * self.true_se_vals_11 * self.true_upoly_vals_11
self.true_comb_22 = self.com_scale * self.true_se_vals_22 * self.true_upoly_vals_22
self.true_comb_12 = self.com_scale * self.true_se_vals_12 * self.true_upoly_vals_12
def test_basics(self):
""" Tests basic functionality. """
self.report('Testing basic functionality.')
kern_1 = kernel.Kernel()
kern_1.set_hyperparams(param_1=1, param_2=2, param_3=3)
assert dicts_are_equal(kern_1.hyperparams, self.hyper_param_dict_1)
kern_1.add_hyperparams(**self.hyper_param_dict_2)
assert dicts_are_equal(kern_1.hyperparams, self.hyper_param_dict_3)
kern_1.set_hyperparams(**self.hyper_param_dict_2)
assert dicts_are_equal(kern_1.hyperparams, self.hyper_param_dict_2)
def test_se_kernel(self):
""" Tests for the SE kernel. """
self.report('Tests for the SE kernel.')
kern = kernel.SEKernel(self.data_1.shape[1], self.se_scale, self.se_dim_bandwidths)
K11 = kern(self.data_1)
K22 = kern(self.data_2)
K12 = kern(self.data_1, self.data_2)
assert np.linalg.norm(self.true_se_vals_11 - K11) < 1e-10
assert np.linalg.norm(self.true_se_vals_22 - K22) < 1e-10
assert np.linalg.norm(self.true_se_vals_12 - K12) < 1e-10
def test_poly_kernel(self):
""" Tests for the polynomial kernel. """
self.report('Tests for the Polynomial kernel.')
kern = kernel.PolyKernel(self.data_1.shape[1], self.poly_order, self.poly_scale,
self.poly_dim_scalings)
K11 = kern(self.data_1)
K22 = kern(self.data_2)
K12 = kern(self.data_1, self.data_2)
assert np.linalg.norm(self.true_poly_vals_11 - K11) < 1e-10
assert np.linalg.norm(self.true_poly_vals_22 - K22) < 1e-10
assert np.linalg.norm(self.true_poly_vals_12 - K12) < 1e-10
def test_unscaled_poly_kernel(self):
""" Tests for the polynomial kernel. """
self.report('Tests for the Unscaled polynomial kernel.')
kern = kernel.UnscaledPolyKernel(self.data_1.shape[1], self.poly_order,
self.unscaled_dim_scalings)
K11 = kern(self.data_1)
K22 = kern(self.data_2)
K12 = kern(self.data_1, self.data_2)
assert np.linalg.norm(self.true_upoly_vals_11 - K11) < 1e-10
assert np.linalg.norm(self.true_upoly_vals_22 - K22) < 1e-10
assert np.linalg.norm(self.true_upoly_vals_12 - K12) < 1e-10
def test_comb_kernel(self):
""" Tests for the combined kernel. """
self.report('Tests for the Combined kernel.')
kern_se = kernel.SEKernel(self.data_1.shape[1], self.se_scale, self.se_dim_bandwidths)
kern_upo = kernel.UnscaledPolyKernel(self.data_1.shape[1], self.poly_order,
self.unscaled_dim_scalings)
kern = kernel.CoordinateProductKernel(self.data_1.shape[0], self.com_scale,
[kern_se, kern_upo], [[0, 1], [0, 1]])
K11 = kern(self.data_1)
K22 = kern(self.data_2)
K12 = kern(self.data_1, self.data_2)
assert np.linalg.norm(self.true_comb_11 - K11) < 1e-10
assert np.linalg.norm(self.true_comb_22 - K22) < 1e-10
assert np.linalg.norm(self.true_comb_12 - K12) < 1e-10
def test_effective_length_se(self):
""" Tests for the effective length in the SE kernel. """
# pylint: disable=too-many-locals
self.report('Tests for the effective length in the SE kernel.')
data_1 = np.array([1, 2])
data_2 = np.array([[0, 1, 2], [1, 1, 0.5]])
bws_1 = [0.1, 1]
bws_2 = [0.5, 1, 2]
res_l2_1 = np.sqrt(104)
res_l2_2 = np.array([np.sqrt(2), np.sqrt(5.0625)])
res_l1_1 = 12
res_l1_2 = np.array([2, 3.25])
dim_1 = 2
dim_2 = 3
all_data = [(data_1, bws_1, dim_1, res_l2_1, res_l1_1),
(data_2, bws_2, dim_2, res_l2_2, res_l1_2)]
for data in all_data:
kern = kernel.SEKernel(data[2], 1, data[1])
eff_l1_norms = kern.get_effective_norm(data[0], order=1,
is_single=len(data[0].shape) == 1)
eff_l2_norms = kern.get_effective_norm(data[0], order=2,
is_single=len(data[0].shape) == 1)
assert np.linalg.norm(eff_l2_norms - data[3]) < 1e-5
assert np.linalg.norm(eff_l1_norms - data[4]) < 1e-5
@classmethod
def _compute_post_covar(cls, kern, X_tr, X_te):
""" Computes the posterior covariance. """
K_tr = kern.evaluate(X_tr, X_tr)
K_tetr = kern.evaluate(X_te, X_tr)
K_te = kern.evaluate(X_te, X_te)
post_covar = K_te - K_tetr.dot(np.linalg.solve(K_tr, K_tetr.T))
post_std = np.sqrt(np.diag(post_covar))
return post_covar, post_std
def test_compute_std_slack_se(self):
""" Tests for the effective length in the SE kernel. """
self.report('Tests for std slack in the SE kernel.')
# The data here are in the order [dim, scale, num_data]
prob_params = [[2, 1, 10], [3, 2, 0], [10, 6, 13]]
n = 5
for prob in prob_params:
dim_bws = list(np.random.random(prob[0]) * 0.3 + 0.5)
kern = kernel.SEKernel(prob[0], prob[1], dim_bws)
X_1 = np.random.random((n, prob[0]))
X_2 = np.random.random((n, prob[0]))
X_tr = np.random.random((prob[2], prob[0]))
_, std_1 = self._compute_post_covar(kern, X_tr, X_1)
_, std_2 = self._compute_post_covar(kern, X_tr, X_2)
std_diff = np.abs(std_1 - std_2)
std_slack = kern.compute_std_slack(X_1, X_2)
diff_12_scaled = kern.get_effective_norm(X_1 - X_2, order=2, is_single=False)
kern_diff_12_scaled = kern.hyperparams['scale'] * diff_12_scaled
assert np.all(std_diff <= std_slack)
assert np.all(std_slack <= kern_diff_12_scaled)
if __name__ == '__main__':
execute_tests()
| 8,448 | 43.941489 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/gp/kernel.py
|
"""
Implements various kernels.
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
# Local imports
from utils.general_utils import dist_squared
class Kernel(object):
""" A kernel class. """
def __init__(self):
""" Constructor. """
super(Kernel, self).__init__()
self.hyperparams = {}
def __call__(self, X1, X2=None):
""" Evaluates the kernel by calling evaluate. """
return self.evaluate(X1, X2)
def evaluate(self, X1, X2=None):
""" Evaluates kernel values between X1 and X2 and returns an n1xn2 kernel matrix.
This is a wrapper for _child_evaluate.
"""
X2 = X1 if X2 is None else X2
return self._child_evaluate(X1, X2)
def _child_evaluate(self, X1, X2):
""" Evaluates kernel values between X1 and X2 and returns an n1xn2 kernel matrix.
This is to be implemented in a child kernel.
"""
raise NotImplementedError('Implement in a child class.')
def set_hyperparams(self, **kwargs):
""" Set hyperparameters here. """
self.hyperparams = kwargs
def add_hyperparams(self, **kwargs):
""" Set additional hyperparameters here. """
for key, value in kwargs.iteritems():
self.hyperparams[key] = value
def get_effective_norm(self, X, order=None, *args, **kwargs):
""" Gets the effective norm scaled by bandwidths. """
raise NotImplementedError('Implement in a child class.')
def compute_std_slack(self, X1, X2):
""" Computes a bound on the maximum standard deviation between X1 and X2. """
raise NotImplementedError('Implement in a child class.')
def change_smoothness(self, factor):
""" Decreases smoothness by the factor given. """
raise NotImplementedError('Implement in a child class.')
class SEKernel(Kernel):
""" Squared exponential kernel. """
def __init__(self, dim, scale=None, dim_bandwidths=None):
""" Constructor. dim is the dimension. """
super(SEKernel, self).__init__()
self.dim = dim
self.set_se_hyperparams(scale, dim_bandwidths)
def set_dim_bandwidths(self, dim_bandwidths):
""" Sets the bandwidth for each dimension. """
if dim_bandwidths is not None:
if len(dim_bandwidths) != self.dim:
raise ValueError('Dimension of dim_bandwidths should be the same as dimension.')
dim_bandwidths = np.array(dim_bandwidths)
self.add_hyperparams(dim_bandwidths=dim_bandwidths)
def set_single_bandwidth(self, bandwidth):
""" Sets the bandwidht of all dimensions to be the same value. """
dim_bandwidths = None if bandwidth is None else [bandwidth] * self.dim
self.set_dim_bandwidths(dim_bandwidths)
def set_scale(self, scale):
""" Sets the scale parameter for the kernel. """
self.add_hyperparams(scale=scale)
def set_se_hyperparams(self, scale, dim_bandwidths):
""" Sets both the scale and the dimension bandwidths for the SE kernel. """
self.set_scale(scale)
if hasattr(dim_bandwidths, '__len__'):
self.set_dim_bandwidths(dim_bandwidths)
else:
self.set_single_bandwidth(dim_bandwidths)
def _child_evaluate(self, X1, X2):
""" Evaluates the SE kernel between X1 and X2 and returns the gram matrix. """
scaled_X1 = self.get_scaled_repr(X1)
scaled_X2 = self.get_scaled_repr(X2)
dist_sq = dist_squared(scaled_X1, scaled_X2)
K = self.hyperparams['scale'] * np.exp(-dist_sq/2)
return K
def get_scaled_repr(self, X):
""" Returns the scaled version of an input by the bandwidths. """
return X/self.hyperparams['dim_bandwidths']
def get_effective_norm(self, X, order=None, is_single=True):
""" Gets the effective norm. That is the norm of X scaled by bandwidths. """
# pylint: disable=arguments-differ
scaled_X = self.get_scaled_repr(X)
if is_single:
return np.linalg.norm(scaled_X, ord=order)
else:
return np.array([np.linalg.norm(sx, ord=order) for sx in scaled_X])
def compute_std_slack(self, X1, X2):
""" Computes a bound on the maximum standard deviation diff between X1 and X2. """
k_12 = np.array([float(self.evaluate(X1[i].reshape(1, -1), X2[i].reshape(1, -1)))
for i in range(len(X1))])
return np.sqrt(self.hyperparams['scale'] - k_12)
def change_smoothness(self, factor):
""" Decreases smoothness by the given factor. """
self.hyperparams['dim_bandwidths'] *= factor
class PolyKernel(Kernel):
""" The polynomial kernel. """
# pylint: disable=abstract-method
def __init__(self, dim, order, scale, dim_scalings=None):
""" Constructor. """
super(PolyKernel, self).__init__()
self.dim = dim
self.set_poly_hyperparams(order, scale, dim_scalings)
def set_order(self, order):
""" Sets the order of the polynomial. """
self.add_hyperparams(order=order)
def set_scale(self, scale):
""" Sets the scale of the kernel. """
self.add_hyperparams(scale=scale)
def set_dim_scalings(self, dim_scalings):
""" Sets the scaling for each dimension in the polynomial kernel. This will be a
dim+1 dimensional vector.
"""
if dim_scalings is not None:
if len(dim_scalings) != self.dim:
raise ValueError('Dimension of dim_scalings should be dim + 1.')
dim_scalings = np.array(dim_scalings)
self.add_hyperparams(dim_scalings=dim_scalings)
def set_single_scaling(self, scaling):
""" Sets the same scaling for all dimensions. """
if scaling is None:
self.set_dim_scalings(None)
else:
self.set_dim_scalings([scaling] * self.dim)
def set_poly_hyperparams(self, order, scale, dim_scalings):
"""Sets the hyper parameters. """
self.set_order(order)
self.set_scale(scale)
if hasattr(dim_scalings, '__len__'):
self.set_dim_scalings(dim_scalings)
else:
self.set_single_scaling(dim_scalings)
def _child_evaluate(self, X1, X2):
""" Evaluates the polynomial kernel and returns and the gram matrix. """
X1 = X1 * self.hyperparams['dim_scalings']
X2 = X2 * self.hyperparams['dim_scalings']
K = self.hyperparams['scale'] * ((X1.dot(X2.T) + 1)**self.hyperparams['order'])
return K
class UnscaledPolyKernel(Kernel):
""" The polynomial kernel. """
# pylint: disable=abstract-method
def __init__(self, dim, order, dim_scalings=None):
""" Constructor. """
super(UnscaledPolyKernel, self).__init__()
self.dim = dim
self.set_unscaled_poly_hyperparams(order, dim_scalings)
def set_order(self, order):
""" Sets the order of the polynomial. """
self.add_hyperparams(order=order)
def set_dim_scalings(self, dim_scalings):
""" Sets the scaling for each dimension in the polynomial kernel. This will be a
dim+1 dimensional vector.
"""
if dim_scalings is not None:
if len(dim_scalings) != self.dim + 1:
raise ValueError('Dimension of dim_scalings should be dim + 1.')
dim_scalings = np.array(dim_scalings)
self.add_hyperparams(dim_scalings=dim_scalings)
def set_single_scaling(self, scaling):
""" Sets the same scaling for all dimensions. """
if scaling is None:
self.set_dim_scalings(None)
else:
self.set_dim_scalings([scaling] * (self.dim + 1))
def set_unscaled_poly_hyperparams(self, order, dim_scalings):
"""Sets the hyper parameters. """
self.set_order(order)
if hasattr(dim_scalings, '__len__'):
self.set_dim_scalings(dim_scalings)
else:
self.set_single_scaling(dim_scalings)
def _child_evaluate(self, X1, X2):
""" Evaluates the polynomial kernel and returns and the gram matrix. """
n1 = X1.shape[0]
n2 = X2.shape[0]
X1 = np.concatenate((np.ones((n1, 1)), X1), axis=1) * self.hyperparams['dim_scalings']
X2 = np.concatenate((np.ones((n2, 1)), X2), axis=1) * self.hyperparams['dim_scalings']
K = (X1.dot(X2.T))**self.hyperparams['order']
return K
class CoordinateProductKernel(Kernel):
""" Implements a coordinatewise product kernel. """
# pylint: disable=abstract-method
def __init__(self, dim, scale, kernel_list=None, coordinate_list=None):
""" Constructor.
kernel_list is a list of n Kernel objects. coordinate_list is a list of n lists
each indicating the coordinates each kernel in kernel_list should be applied to.
"""
super(CoordinateProductKernel, self).__init__()
self.dim = dim
self.scale = scale
self.kernel_list = kernel_list
self.coordinate_list = coordinate_list
def set_kernel_list(self, kernel_list):
""" Sets a new list of kernels. """
self.kernel_list = kernel_list
def set_new_kernel(self, kernel_idx, new_kernel):
""" Sets new_kernel to kernel_list[kernel_idx]. """
self.kernel_list[kernel_idx] = new_kernel
def set_kernel_hyperparams(self, kernel_idx, **kwargs):
""" Sets the hyper-parameters for kernel_list[kernel_idx]. """
self.kernel_list[kernel_idx].set_hyperparams(**kwargs)
def _child_evaluate(self, X1, X2):
""" Evaluates the combined kernel. """
n1 = X1.shape[0]
n2 = X2.shape[0]
K = self.scale * np.ones((n1, n2))
for idx, kernel in enumerate(self.kernel_list):
X1_sel = X1[:, self.coordinate_list[idx]]
X2_sel = X2[:, self.coordinate_list[idx]]
K *= kernel(X1_sel, X2_sel)
return K
| 9,318 | 33.772388 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/option_handler.py
|
"""
A harness to load options.
"""
# pylint: disable=star-args
import argparse
from copy import deepcopy
def get_option_specs(name, required=False, default=None, help_str='', **kwargs):
""" A wrapper function to get a specification as a dictionary. """
ret = {'name':name, 'required':required, 'default':default, 'help':help_str}
for key, value in kwargs.items():
ret[key] = value
return ret
def _print_options(ondp, desc, reporter):
""" Prints the options out. """
if reporter is None:
return
title_str = 'Hyper-parameters for %s '%(desc)
title_str = title_str + '-'*(80 - len(title_str))
reporter.writeln(title_str)
for key, value in sorted(ondp.iteritems()):
is_changed_str = '*' if value[0] != value[1] else ' '
reporter.writeln(' %s %s %s'%(key.ljust(30), is_changed_str, str(value[1])))
def load_options(list_of_options, descr='Algorithm', reporter=None):
""" Given a list of options, this reads them from the command line and returns
a namespace with the values.
"""
parser = argparse.ArgumentParser(description=descr)
opt_names_default_parsed = {}
for elem in list_of_options:
opt_dict = deepcopy(elem)
opt_name = opt_dict.pop('name')
opt_names_default_parsed[opt_name] = [opt_dict['default'], None]
if not opt_name.startswith('--'):
opt_name = '--' + opt_name
parser.add_argument(opt_name, **opt_dict)
args = parser.parse_args()
for key in opt_names_default_parsed:
opt_names_default_parsed[key][1] = getattr(args, key)
_print_options(opt_names_default_parsed, descr, reporter)
return args
| 1,599 | 32.333333 | 81 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.