desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Parameters weight_init_std : 重みの暙準偏差を指定e.g. 0.01 \'relu\'たたは\'he\'を指定した堎合は「Heの初期倀」を蚭定 \'sigmoid\'たたは\'xavier\'を指定した堎合は「Xavierの初期倀」を蚭定'
def __init_weight(self, weight_init_std):
all_size_list = (([self.input_size] + self.hidden_size_list) + [self.output_size]) for idx in range(1, len(all_size_list)): scale = weight_init_std if (str(weight_init_std).lower() in ('relu', 'he')): scale = np.sqrt((2.0 / all_size_list[(idx - 1)])) elif (str(weight_init_std).lower() in ('sigmoid', 'xavier')): scale = np.sqrt((1.0 / all_size_list[(idx - 1)])) self.params[('W' + str(idx))] = (scale * np.random.randn(all_size_list[(idx - 1)], all_size_list[idx])) self.params[('b' + str(idx))] = np.zeros(all_size_list[idx])
'匕数のxは入力デヌタ、tは教垫ラベル'
def loss(self, x, t, train_flg=False):
y = self.predict(x, train_flg) weight_decay = 0 for idx in range(1, (self.hidden_layer_num + 2)): W = self.params[('W' + str(idx))] weight_decay += ((0.5 * self.weight_decay_lambda) * np.sum((W ** 2))) return (self.last_layer.forward(y, t) + weight_decay)
'Parameters X : 入力デヌタ T : 教垫ラベル Returns grads[\'W1\']、grads[\'W2\']、...は各局の重み grads[\'b1\']、grads[\'b2\']、...は各局のバむアス'
def numerical_gradient(self, X, T):
loss_W = (lambda W: self.loss(X, T, train_flg=True)) grads = {} for idx in range(1, (self.hidden_layer_num + 2)): grads[('W' + str(idx))] = numerical_gradient(loss_W, self.params[('W' + str(idx))]) grads[('b' + str(idx))] = numerical_gradient(loss_W, self.params[('b' + str(idx))]) if (self.use_batchnorm and (idx != (self.hidden_layer_num + 1))): grads[('gamma' + str(idx))] = numerical_gradient(loss_W, self.params[('gamma' + str(idx))]) grads[('beta' + str(idx))] = numerical_gradient(loss_W, self.params[('beta' + str(idx))]) return grads
'匕数のxは入力デヌタ、tは教垫ラベル'
def loss(self, x, t):
y = self.predict(x) return self.last_layer.forward(y, t)
'Parameters x : 入力デヌタ t : 教垫ラベル Returns grads[\'W1\']、grads[\'W2\']、...は各局の重み grads[\'b1\']、grads[\'b2\']、...は各局のバむアス'
def numerical_gradient(self, x, t):
loss_w = (lambda w: self.loss(x, t)) grads = {} for idx in (1, 2, 3): grads[('W' + str(idx))] = numerical_gradient(loss_w, self.params[('W' + str(idx))]) grads[('b' + str(idx))] = numerical_gradient(loss_w, self.params[('b' + str(idx))]) return grads
'Parameters x : 入力デヌタ t : 教垫ラベル Returns grads[\'W1\']、grads[\'W2\']、...は各局の重み grads[\'b1\']、grads[\'b2\']、...は各局のバむアス'
def gradient(self, x, t):
self.loss(x, t) dout = 1 dout = self.last_layer.backward(dout) layers = list(self.layers.values()) layers.reverse() for layer in layers: dout = layer.backward(dout) grads = {} (grads['W1'], grads['b1']) = (self.layers['Conv1'].dW, self.layers['Conv1'].db) (grads['W2'], grads['b2']) = (self.layers['Affine1'].dW, self.layers['Affine1'].db) (grads['W3'], grads['b3']) = (self.layers['Affine2'].dW, self.layers['Affine2'].db) return grads
'Start the server.'
def start(self):
plugins = voltron.plugin.pm.web_plugins self.app = DispatcherMiddleware(RootFlaskApp(), {'/api': APIFlaskApp(server=self), '/view': SharedDataMiddleware(None, {'/{}'.format(n): os.path.join(p._dir, 'static') for (n, p) in six.iteritems(plugins)}), '/ui': ui_app}) def run_listener(name, cls, arg): log.debug('Starting listener for {} socket on {}'.format(name, str(arg))) s = cls(*arg) t = threading.Thread(target=s.serve_forever) t.daemon = True t.start() self.threads.append(t) self.listeners.append(s) if voltron.config.server.listen.tcp: run_listener('tcp', ThreadedVoltronWSGIServer, (list(voltron.config.server.listen.tcp) + [self.app])) if (voltron.config.server.listen.domain and (sys.platform != 'win32')): path = os.path.expanduser(str(voltron.config.server.listen.domain)) try: os.unlink(path) except: pass run_listener('domain', ThreadedUnixWSGIServer, [path, self.app]) self.is_running = True
'Stop the server.'
def stop(self):
log.debug('Stopping listeners') self.queue_lock.acquire() for s in self.listeners: log.debug('Stopping {}'.format(s)) s.shutdown() s.socket.close() self.cancel_queue() for t in self.threads: t.join() self.listeners = [] self.threads = [] self.is_running = False self.queue_lock.release() log.debug('Listeners stopped and threads joined')
'Cancel all requests in the queue so we can exit.'
def cancel_queue(self):
q = list(self.queue) self.queue = [] log.debug('Canceling requests: {}'.format(q)) for req in q: req.response = APIServerNotRunningErrorResponse() for req in q: req.signal()
'Dispatch any queued requests. Called by the debugger when it stops.'
def dispatch_queue(self):
self.queue_lock.acquire() q = list(self.queue) self.queue = [] self.queue_lock.release() log.debug('Dispatching requests: {}'.format(q)) for req in q: req.response = self.dispatch_request(req) for req in q: req.signal()
'Dispatch a request object.'
def dispatch_request(self, req):
log.debug('Dispatching request: {}'.format(str(req))) res = None try: req.validate() except MissingFieldError as e: res = APIMissingFieldErrorResponse(str(e)) if (not res): try: res = req.dispatch() except Exception as e: msg = 'Exception raised while dispatching request: {}'.format(repr(e)) log.exception(msg) res = APIGenericErrorResponse(msg) log.debug('Response: {}'.format(str(res))) return res
'Initialise a new client'
def __init__(self, host='127.0.0.1', port=5555, sockfile=None, url=None, build_requests=None, callback=None, supports_blocking=True):
self.session = requests.Session() if url: self.url = url elif sockfile: self.url = 'http+unix://{}/api/request'.format(sockfile.replace('/', '%2F')) elif voltron.config.view.api_url: self.url = voltron.config.view.api_url else: self.url = 'http://{}:{}/api/request'.format(host, port) self.url = self.url.replace('~', os.path.expanduser('~').replace('/', '%2f')) self.callback = callback self.build_requests = build_requests self.done = False self.server_version = None self.block = False self.supports_blocking = supports_blocking
'Send a request to the server. `request` is an APIRequest subclass. Returns an APIResponse or subclass instance. If an error occurred, it will be an APIErrorResponse, if the request was successful it will be the plugin\'s specified response class if one exists, otherwise it will be an APIResponse.'
def send_request(self, request):
res = APIEmptyResponseErrorResponse() log.debug(('Client sending request: ' + str(request))) response = self.session.post(self.url, data=str(request)) data = response.text if (response.status_code != 200): res = APIGenericErrorResponse(response.text) elif (data and (len(data) > 0)): log.debug(('Client received message: ' + data)) try: generic_response = APIResponse(data=data) if generic_response.is_error: res = APIErrorResponse(data=data) else: plugin = voltron.plugin.pm.api_plugin_for_request(request.request) if (plugin and plugin.response_class): res = plugin.response_class(data=data) else: res = generic_response except Exception as e: log.exception(('Exception parsing message: ' + str(e))) log.error(('Invalid message: ' + data)) else: res = APIEmptyResponseErrorResponse() return res
'Send a set of requests. Each request is sent over its own connection and the function will return when all the requests have been fulfilled.'
def send_requests(self, *args):
threads = [ClientThread(self, req) for req in args] for t in threads: t.start() for t in threads: t.join() exceptions = [t.exception for t in threads if t.exception] if len(exceptions): raise exceptions[0] return [t.response for t in threads]
'Create a request. `request_type` is the request type (string). This is used to look up a plugin, whose request class is instantiated and passed the remaining arguments passed to this function.'
def create_request(self, request_type, *args, **kwargs):
return api_request(request_type, *args, **kwargs)
'Create and send a request. `request_type` is the request type (string). This is used to look up a plugin, whose request class is instantiated and passed the remaining arguments passed to this function.'
def perform_request(self, request_type, *args, **kwargs):
req = api_request(request_type, *args, **kwargs) res = self.send_request(req) return res
'Update the display'
def update(self):
reqs = self.build_requests() for r in reqs: r.block = self.block results = self.send_requests(*reqs) self.callback(results)
'Run the client in a loop, calling the callback each time the debugger stops.'
def run(self, build_requests=None, callback=None):
if callback: self.callback = callback if build_requests: self.build_requests = build_requests def normalise_requests_err(e): try: msg = e.message.args[1].strerror except: try: msg = e.message.args[0] except: msg = str(e) return msg while (not self.done): try: if (not self.server_version): self.server_version = self.perform_request('version') if (self.server_version.capabilities and ('async' in self.server_version.capabilities)): self.update() self.block = False elif self.supports_blocking: self.block = True else: raise BlockingNotSupportedError('Debugger requires blocking mode') if self.block: self.update() else: res = self.perform_request('null', block=True) if res.is_success: self.server_version = res self.update() except ConnectionError as e: self.callback(error='Error: {}'.format(normalise_requests_err(e))) self.server_version = None time.sleep(1)
'Run the client using a background thread.'
def start(self, build_requests=None, callback=None):
if callback: self.callback = callback if build_requests: self.build_requests = build_requests self.sw = threading.Thread(target=self.run) self.sw.start()
'Stop the background thread.'
def stop(self):
self.done = True
'Build requests for this view. Concrete view subclasses must implement this.'
def build_requests(self):
return []
'Run the view event loop.'
def run(self):
def render(results=[], error=None): if (len(results) and (not results[0].timed_out)): self.render(results) elif error: self.do_render(error=error) self.client.start(self.build_requests, render) try: with self.t.cbreak(): val = '' while (not self.done): val = self.t.inkey(timeout=1) if val: self.handle_key(val) except KeyboardInterrupt: self.exit()
'Handle a keypress. Concrete subclasses can implement this method if custom keypresses need to be handled other than for exit and scrolling.'
def handle_key(self, key):
try: func = None if key.is_sequence: try: func = self.config.keymap[key.name] except: try: func = self.config.keymap[key.code] except: func = self.config.keymap[str(key)] else: func = self.config.keymap[str(key)] if (func in self.valid_key_funcs): getattr(self, func)() except: raise
'Returns True or False indicating whether or not the specified target is present and valid. `target_id` is a target ID (or None for the first target)'
def target_exists(self, target_id=0):
try: target = self._target(target_id=target_id) except Exception as e: log.error('Exception checking if target exists: {} {}'.format(type(e), e)) return False return (target is not None)
'Returns True or False indicating whether or not the specified target is present and valid. `target_id` is a target ID (or None for the first target)'
def target_is_valid(self, target_id=0):
try: target = self._target(target_id=target_id) except: return False return (target['state'] != 'invalid')
'Returns True or False indicating whether or not the specified target is busy. `target_id` is a target ID (or None for the first target)'
def target_is_busy(self, target_id=0):
try: target = self._target(target_id=target_id) except: raise NoSuchTargetException() return (target['state'] == 'running')
'Add a listener for state changes.'
def add_listener(self, callback, state_changes=['stopped']):
self.listeners.append({'callback': callback, 'state_changes': state_changes})
'Remove a listener.'
def remove_listener(self, callback):
listeners = filter((lambda x: (x['callback'] == callback)), self.listeners) for l in listeners: self.listeners.remove(l)
'Notify all the listeners (probably `wait` plugins) that the state has changed. This is called by the debugger\'s stop-hook.'
def update_state(self):
for listener in self.listeners: listener['callback']()
'Return a list of the debugger\'s capabilities. Thus far only the \'async\' capability is supported. This indicates that the debugger host can be queried from a background thread, and that views can use non-blocking API requests without queueing requests to be dispatched next time the debugger stops.'
def capabilities(self):
return []
'Disassemble with capstone.'
def disassemble_capstone(self, target_id=0, address=None, count=None):
target = self._target(target_id) if (not address): (pc_name, address) = self.pc() mem = self.memory(address, (count * 16), target_id=target_id) md = capstone.Cs(*self.cs_archs[target['arch']]) output = [] for (idx, i) in enumerate(md.disasm(mem, address)): if (idx >= count): break output.append(('0x%x: DCTB %s DCTB %s' % (i.address, i.mnemonic, i.op_str))) return '\n'.join(output)
'Return a string (JSON) representation of the API message properties.'
def __str__(self):
return self.to_json()
'Return a transmission-safe dictionary representation of the API message properties.'
def to_dict(self):
d = {field: getattr(self, field) for field in self._top_fields if hasattr(self, field)} d['data'] = {} for field in self._fields: if hasattr(self, field): if (field in self._encode_fields): val = getattr(self, field) if val: val = cast_s(base64.b64encode(cast_b(val))) d['data'][field] = val else: d['data'][field] = getattr(self, field) return d
'Initialise an API message from a transmission-safe dictionary.'
def from_dict(self, d):
for key in d: if (key == 'data'): for dkey in d['data']: if (dkey in self._encode_fields): setattr(self, str(dkey), base64.b64decode(d['data'][dkey])) else: setattr(self, str(dkey), d['data'][dkey]) else: setattr(self, str(key), d[key])
'Return a JSON representation of the API message properties.'
def to_json(self):
return json.dumps(self.to_dict())
'Initialise an API message from a JSON representation.'
def from_json(self, data):
try: d = json.loads(data) except ValueError: raise InvalidMessageException() self.from_dict(d)
'Attribute accessor. If a defined field is requested that doesn\'t have a value set, return None.'
def __getattr__(self, name):
if (name in self._fields): return None
'Validate the message. Ensure all the required fields are present and not None.'
def validate(self):
required_fields = list(filter((lambda x: self._fields[x]), self._fields.keys())) for field in (self._top_fields + required_fields): if ((not hasattr(self, field)) or (hasattr(self, field) and (getattr(self, field) == None))): raise MissingFieldError(field)
'In concrete subclasses this method will actually dispatch the request to the debugger host and return a response. In this case it raises an exception.'
@server_side def dispatch(self):
raise NotImplementedError('Subclass APIRequest')
'Wait for the request to be dispatched.'
@server_side def wait(self):
self.wait_event = threading.Event() timeout = (int(self.timeout) if self.timeout else None) self.timed_out = (not self.wait_event.wait(timeout))
'Signal that the request has been dispatched and can return.'
def signal(self):
self.wait_event.set()
'Initialise a new PluginManager.'
def __init__(self):
self._api_plugins = defaultdict((lambda : None)) self._debugger_plugins = defaultdict((lambda : None)) self._view_plugins = defaultdict((lambda : None)) self._web_plugins = defaultdict((lambda : None)) self._command_plugins = defaultdict((lambda : None))
'Register a new plugin with the PluginManager. `plugin` is a subclass of scruffy\'s Plugin class. This is called by __init__(), but may also be called by the debugger host to load a specific plugin at runtime.'
def register_plugin(self, plugin):
if hasattr(plugin, 'initialise'): plugin.initialise() if self.valid_api_plugin(plugin): log.debug('Registering API plugin: {}'.format(plugin)) self._api_plugins[plugin.request] = plugin() elif self.valid_debugger_plugin(plugin): log.debug('Registering debugger plugin: {}'.format(plugin)) self._debugger_plugins[plugin.host] = plugin() elif self.valid_view_plugin(plugin): log.debug('Registering view plugin: {}'.format(plugin)) self._view_plugins[plugin.name] = plugin() elif self.valid_web_plugin(plugin): log.debug('Registering web plugin: {}'.format(plugin)) self._web_plugins[plugin.name] = plugin() elif self.valid_command_plugin(plugin): log.debug('Registering command plugin: {}'.format(plugin)) self._command_plugins[plugin.name] = plugin() if voltron.debugger: voltron.debugger.register_command_plugin(plugin.name, plugin.command_class) else: log.debug('Ignoring invalid plugin: {}'.format(plugin))
'Validate an API plugin, ensuring it is an API plugin and has the necessary fields present. `plugin` is a subclass of scruffy\'s Plugin class.'
def valid_api_plugin(self, plugin):
if (issubclass(plugin, APIPlugin) and hasattr(plugin, 'plugin_type') and (plugin.plugin_type == 'api') and hasattr(plugin, 'request') and (plugin.request != None) and hasattr(plugin, 'request_class') and (plugin.request_class != None) and hasattr(plugin, 'response_class') and (plugin.response_class != None)): return True return False
'Validate a debugger plugin, ensuring it is a debugger plugin and has the necessary fields present. `plugin` is a subclass of scruffy\'s Plugin class.'
def valid_debugger_plugin(self, plugin):
if (issubclass(plugin, DebuggerAdaptorPlugin) and hasattr(plugin, 'plugin_type') and (plugin.plugin_type == 'debugger') and hasattr(plugin, 'host') and (plugin.host != None)): return True return False
'Validate a view plugin, ensuring it is a view plugin and has the necessary fields present. `plugin` is a subclass of scruffy\'s Plugin class.'
def valid_view_plugin(self, plugin):
if (issubclass(plugin, ViewPlugin) and hasattr(plugin, 'plugin_type') and (plugin.plugin_type == 'view') and hasattr(plugin, 'name') and (plugin.name != None) and hasattr(plugin, 'view_class') and (plugin.view_class != None)): return True return False
'Validate a web plugin, ensuring it is a web plugin and has the necessary fields present. `plugin` is a subclass of scruffy\'s Plugin class.'
def valid_web_plugin(self, plugin):
if (issubclass(plugin, WebPlugin) and hasattr(plugin, 'plugin_type') and (plugin.plugin_type == 'web') and hasattr(plugin, 'name') and (plugin.name != None)): return True return False
'Validate a command plugin, ensuring it is a command plugin and has the necessary fields present. `plugin` is a subclass of scruffy\'s Plugin class.'
def valid_command_plugin(self, plugin):
if (issubclass(plugin, CommandPlugin) and hasattr(plugin, 'plugin_type') and (plugin.plugin_type == 'command') and hasattr(plugin, 'name') and (plugin.name != None)): return True return False
'Find an API plugin that supports the given request type.'
def api_plugin_for_request(self, request=None):
return self.api_plugins[request]
'Find a debugger plugin that supports the debugger host.'
def debugger_plugin_for_host(self, host=None):
return self.debugger_plugins[host]
'Find a view plugin that for the given view name.'
def view_plugin_with_name(self, name=None):
return self.view_plugins[name]
'Find a web plugin that for the given view name.'
def web_plugin_with_name(self, name=None):
return self.web_plugins[name]
'Find a command plugin that for the given view name.'
def command_plugin_with_name(self, name=None):
return self.command_plugins[name]
'A t-Distributed Stochastic Neighbor Embedding implementation. Parameters max_iter : int, default 200 perplexity : float, default 30.0 n_components : int, default 2'
def __init__(self, n_components=2, perplexity=30.0, max_iter=200, learning_rate=500):
self.max_iter = max_iter self.perplexity = perplexity self.n_components = n_components self.initial_momentum = 0.5 self.final_momentum = 0.8 self.min_gain = 0.01 self.lr = learning_rate self.tol = 1e-05 self.perplexity_tries = 50
'Computes pairwise affinities.'
def _get_pairwise_affinities(self, X):
affines = np.zeros((self.n_samples, self.n_samples), dtype=np.float32) target_entropy = np.log(self.perplexity) distances = l2_distance(X) for i in range(self.n_samples): affines[i, :] = self._binary_search(distances[i], target_entropy) np.fill_diagonal(affines, 1e-12) affines = affines.clip(min=1e-100) affines = ((affines + affines.T) / (2 * self.n_samples)) return affines
'Performs binary search to find suitable precision.'
def _binary_search(self, dist, target_entropy):
precision_min = 0 precision_max = 1000000000000000.0 precision = 100000.0 for _ in range(self.perplexity_tries): denom = np.sum(np.exp(((- dist[(dist > 0.0)]) / precision))) beta = (np.exp(((- dist) / precision)) / denom) g_beta = beta[(beta > 0.0)] entropy = (- np.sum((g_beta * np.log2(g_beta)))) error = (entropy - target_entropy) if (error > 0): precision_max = precision precision = ((precision + precision_min) / 2.0) else: precision_min = precision precision = ((precision + precision_max) / 2.0) if (np.abs(error) < self.tol): break return beta
'Computes Student t-distribution.'
def _q_distribution(self, D):
Q = (1.0 / (1.0 + D)) np.fill_diagonal(Q, 0.0) Q = Q.clip(min=1e-100) return Q
'Predict log likelihood for given row.'
def _predict_row(self, x):
output = [] for y in range(self.n_classes): prior = np.log(self._priors[y]) posterior = np.log(self._pdf(y, x)).sum() prediction = (prior + posterior) output.append(prediction) return output
'Calculate Gaussian PDF for each feature.'
def _pdf(self, n_class, x):
mean = self._mean[n_class] var = self._var[n_class] numerator = np.exp(((- ((x - mean) ** 2)) / (2 * var))) denominator = np.sqrt(((2 * np.pi) * var)) return (numerator / denominator)
'Deep Q learning implementation. Parameters min_epsilon : float Minimal value for epsilon. epsilon : float ε-greedy value. decay : float Epsilon decay rate. memory_limit : int Limit of experience replay memory.'
def __init__(self, n_episodes=500, gamma=0.99, batch_size=32, epsilon=1.0, decay=0.005, min_epsilon=0.1, memory_limit=500):
self.memory_limit = memory_limit self.min_epsilon = min_epsilon self.gamma = gamma self.epsilon = epsilon self.n_episodes = n_episodes self.batch_size = batch_size self.decay = decay
'Bernoulli Restricted Boltzmann Machine (RBM) Parameters n_hidden : int, default 128 The number of hidden units. learning_rate : float, default 0.1 batch_size : int, default 10 max_epochs : int, default 100'
def __init__(self, n_hidden=128, learning_rate=0.1, batch_size=10, max_epochs=100):
self.max_epochs = max_epochs self.batch_size = batch_size self.lr = learning_rate self.n_hidden = n_hidden
'Use CD-1 training procedure, basically an exact inference for `positive_associations`, followed by a "non burn-in" block Gibbs Sampling for the `negative_associations`.'
def _train(self):
for i in range(self.max_epochs): error = 0 for batch in batch_iterator(self.X, batch_size=self.batch_size): positive_hidden = sigmoid((np.dot(batch, self.W) + self.bias_h)) hidden_states = self._sample(positive_hidden) positive_associations = np.dot(batch.T, positive_hidden) negative_visible = sigmoid((np.dot(hidden_states, self.W.T) + self.bias_v)) negative_visible = self._sample(negative_visible) negative_hidden = sigmoid((np.dot(negative_visible, self.W) + self.bias_h)) negative_associations = np.dot(negative_visible.T, negative_hidden) lr = (self.lr / float(batch.shape[0])) self.W += (lr * ((positive_associations - negative_associations) / float(self.batch_size))) self.bias_h += (lr * (negative_hidden.sum(axis=0) - negative_associations.sum(axis=0))) self.bias_v += (lr * (np.asarray(batch.sum(axis=0)).squeeze() - negative_visible.sum(axis=0))) error += np.sum(((batch - negative_visible) ** 2)) self.errors.append(error) logging.info(('Iteration %s, error %s' % (i, error))) logging.debug(('Weights: %s' % self.W)) logging.debug(('Hidden bias: %s' % self.bias_h)) logging.debug(('Visible bias: %s' % self.bias_v))
'Support vector machines implementation using simplified SMO optimization. Parameters C : float, default 1.0 kernel : Kernel object tol : float , default 1e-3 max_iter : int, default 100'
def __init__(self, C=1.0, kernel=None, tol=0.001, max_iter=100):
self.C = C self.tol = tol self.max_iter = max_iter if (kernel is None): self.kernel = Linear() else: self.kernel = kernel self.b = 0 self.alpha = None self.K = None
'Error for single example.'
def _error(self, i):
return (self._predict_row(self.X[i]) - self.y[i])
'Find L and H such that L <= alpha <= H. Also, alpha must satisfy the constraint 0 <= αlpha <= C.'
def _find_bounds(self, i, j):
if (self.y[i] != self.y[j]): L = max(0, (self.alpha[j] - self.alpha[i])) H = min(self.C, ((self.C - self.alpha[i]) + self.alpha[j])) else: L = max(0, ((self.alpha[i] + self.alpha[j]) - self.C)) H = min(self.C, (self.alpha[i] + self.alpha[j])) return (L, H)
'Base class for RandomForest. Parameters n_estimators : int The number of decision tree. max_features : int The number of features to consider when looking for the best split. min_samples_split : int The minimum number of samples required to split an internal node. max_depth : int Maximum depth of the tree. criterion : str The function to measure the quality of a split.'
def __init__(self, n_estimators=10, max_features=None, min_samples_split=10, max_depth=None, criterion=None):
self.max_depth = max_depth self.min_samples_split = min_samples_split self.max_features = max_features self.n_estimators = n_estimators self.trees = []
'First order gradient.'
def grad(self, actual, predicted):
raise NotImplementedError()
'Second order gradient.'
def hess(self, actual, predicted):
raise NotImplementedError()
'Approximate leaf value.'
def approximate(self, actual, predicted):
return (self.grad(actual, predicted).sum() / (self.hess(actual, predicted).sum() + self.regularization))
'Transform predictions values.'
def transform(self, pred):
return pred
'Calculate gain for split search.'
def gain(self, actual, predicted):
nominator = (self.grad(actual, predicted).sum() ** 2) denominator = (self.hess(actual, predicted).sum() + self.regularization) return (0.5 * (nominator / denominator))
'Find all possible split values.'
def _find_splits(self, X):
split_values = set() x_unique = list(np.unique(X)) for i in range(1, len(x_unique)): average = ((x_unique[(i - 1)] + x_unique[i]) / 2.0) split_values.add(average) return list(split_values)
'Find best feature and value for a split. Greedy algorithm.'
def _find_best_split(self, X, target, n_features):
subset = random.sample(list(range(0, X.shape[1])), n_features) (max_gain, max_col, max_val) = (None, None, None) for column in subset: split_values = self._find_splits(X[:, column]) for value in split_values: if (self.loss is None): splits = split(X[:, column], target['y'], value) gain = self.criterion(target['y'], splits) else: (left, right) = split_dataset(X, target, column, value, return_X=False) gain = xgb_criterion(target, left, right, self.loss) if ((max_gain is None) or (gain > max_gain)): (max_col, max_val, max_gain) = (column, value, gain) return (max_col, max_val, max_gain)
'Build a decision tree from training set. Parameters X : array-like Feature dataset. target : dictionary or array-like Target values. max_features : int or None The number of features to consider when looking for the best split. min_samples_split : int The minimum number of samples required to split an internal node. max_depth : int Maximum depth of the tree. minimum_gain : float, default 0.01 Minimum gain required for splitting. loss : function, default None Loss function for gradient boosting.'
def train(self, X, target, max_features=None, min_samples_split=10, max_depth=None, minimum_gain=0.01, loss=None):
if (not isinstance(target, dict)): target = {'y': target} if (loss is not None): self.loss = loss try: assert (X.shape[0] > min_samples_split) assert (max_depth > 0) if (max_features is None): max_features = X.shape[1] (column, value, gain) = self._find_best_split(X, target, max_features) assert (gain is not None) if self.regression: assert (gain != 0) else: assert (gain > minimum_gain) self.column_index = column self.threshold = value self.impurity = gain (left_X, right_X, left_target, right_target) = split_dataset(X, target, column, value) self.left_child = Tree(self.regression, self.criterion) self.left_child.train(left_X, left_target, max_features, min_samples_split, (max_depth - 1), minimum_gain, loss) self.right_child = Tree(self.regression, self.criterion) self.right_child.train(right_X, right_target, max_features, min_samples_split, (max_depth - 1), minimum_gain, loss) except AssertionError: self._calculate_leaf_value(target)
'Find optimal value for leaf.'
def _calculate_leaf_value(self, targets):
if (self.loss is not None): self.outcome = self.loss.approximate(targets['actual'], targets['y_pred']) elif self.regression: self.outcome = np.mean(targets['y']) else: self.outcome = (stats.itemfreq(targets['y'])[:, 1] / float(targets['y'].shape[0]))
'Predict single row.'
def predict_row(self, row):
if (not self.is_terminal): if (row[self.column_index] < self.threshold): return self.left_child.predict_row(row) else: return self.right_child.predict_row(row) return self.outcome
'Ensure inputs to an estimator are in the expected format. Ensures X and y are stored as numpy ndarrays by converting from an array-like object if necessary. Enables estimators to define whether they require a set of y target values or not with y_required, e.g. kmeans clustering requires no target labels and is fit against only X. Parameters X : array-like Feature dataset. y : array-like Target values. By default is required, but if y_required = false then may be omitted.'
def _setup_input(self, X, y=None):
if (not isinstance(X, np.ndarray)): X = np.array(X) if (X.size == 0): raise ValueError('Number of features must be > 0') if (X.ndim == 1): (self.n_samples, self.n_features) = (1, X.shape) else: (self.n_samples, self.n_features) = (X.shape[0], np.prod(X.shape[1:])) self.X = X if self.y_required: if (y is None): raise ValueError('Missed required argument y') if (not isinstance(y, np.ndarray)): y = np.array(y) if (y.size == 0): raise ValueError('Number of targets must be > 0') self.y = y
'Set the initial centroids.'
def _initialize_centroids(self, init):
if (init == 'random'): self.centroids = [self.X[x] for x in random.sample(range(self.n_samples), self.K)] elif (init == '++'): self.centroids = [random.choice(self.X)] while (len(self.centroids) < self.K): self.centroids.append(self._choose_next_center()) else: raise ValueError('Unknown type of init parameter')
'Perform clustering on the dataset.'
def _predict(self, X=None):
self._initialize_centroids(self.init) centroids = self.centroids for _ in range(self.max_iters): self._assign(centroids) centroids_old = centroids centroids = [self._get_centroid(cluster) for cluster in self.clusters] if self._is_converged(centroids_old, centroids): break self.centroids = centroids return self._get_predictions()
'Find the closest centroid for a point.'
def _closest(self, fpoint, centroids):
closest_index = None closest_distance = None for (i, point) in enumerate(centroids): dist = euclidean_distance(self.X[fpoint], point) if ((closest_index is None) or (dist < closest_distance)): closest_index = i closest_distance = dist return closest_index
'Get values by indices and take the mean.'
def _get_centroid(self, cluster):
return [np.mean(np.take(self.X[:, i], cluster)) for i in range(self.n_features)]
'Calculate distance from centers.'
def _dist_from_centers(self):
return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X])
'Check if the distance between old and new centroids is zero.'
def _is_converged(self, centroids_old, centroids):
distance = 0 for i in range(self.K): distance += euclidean_distance(centroids_old[i], centroids[i]) return (distance == 0)
'Basic class for implementing continuous regression estimators which are trained with gradient descent optimization on their particular loss function. Parameters lr : float, default 0.001 Learning rate. penalty : str, {\'l1\', \'l2\', None\'}, default None Regularization function name. C : float, default 0.01 The regularization coefficient. tolerance : float, default 0.0001 If the gradient descent updates are smaller than `tolerance`, then stop optimization process. max_iters : int, default 10000 The maximum number of iterations.'
def __init__(self, lr=0.001, penalty='None', C=0.01, tolerance=0.0001, max_iters=1000):
self.C = C self.penalty = penalty self.tolerance = tolerance self.lr = lr self.max_iters = max_iters self.errors = [] self.theta = [] (self.n_samples, self.n_features) = (None, None) self.cost_func = None
'Apply regularization to the loss.'
def _add_penalty(self, loss, w):
if (self.penalty == 'l1'): loss += (self.C * np.abs(w[:(-1)]).sum()) elif (self.penalty == 'l2'): loss += ((0.5 * self.C) * (w[:(-1)] ** 2).mean()) return loss
'Simplified factorization machines implementation using SGD optimizer.'
def __init__(self, n_components=10, max_iter=100, init_stdev=0.1, learning_rate=0.01, reg_v=0.1, reg_w=0.5, reg_w0=0.0):
self.reg_w0 = reg_w0 self.reg_w = reg_w self.reg_v = reg_v self.n_components = n_components self.lr = learning_rate self.init_stdev = init_stdev self.max_iter = max_iter self.loss = None self.loss_grad = None
'Base class for Nearest neighbors classifier and regressor. Parameters k : int, default 5 The number of neighbors to take into account. If 0, all the training examples are used. distance_func : function, default euclidean distance A distance function taking two arguments. Any function from scipy.spatial.distance will do.'
def __init__(self, k=5, distance_func=euclidean):
self.k = (None if (k == 0) else k) self.distance_func = distance_func
'Predict the label of a single instance x.'
def _predict_x(self, x):
distances = (self.distance_func(x, example) for example in self.X) neighbors = sorted(((dist, target) for (dist, target) in zip(distances, self.y)), key=(lambda x: x[0])) neighbors_targets = [target for (_, target) in neighbors[:self.k]] return self.aggregate(neighbors_targets)
'Return the most common target label.'
def aggregate(self, neighbors_targets):
most_common_label = Counter(neighbors_targets).most_common(1)[0][0] return most_common_label
'Return the mean of all targets.'
def aggregate(self, neighbors_targets):
return np.mean(neighbors_targets)
'Perform Expectation–Maximization (EM) until converged.'
def fit(self, X, y=None):
self._setup_input(X, y) self._initialize() for _ in range(self.max_iters): self._E_step() self._M_step() if self._is_converged(): break
'Set the initial weights, means and covs (with full covariance matrix). weights: the prior of the clusters (what percentage of data does a cluster have) means: the mean points of the clusters covs: the covariance matrix of the clusters'
def _initialize(self):
self.weights = np.ones(self.K) if (self.init == 'random'): self.means = [self.X[x] for x in random.sample(range(self.n_samples), self.K)] self.covs = [np.cov(self.X.T) for _ in range(K)] elif (self.init == 'kmeans'): kmeans = KMeans(K=self.K, max_iters=(self.max_iters // 3), init='++') kmeans.fit(self.X) self.assignments = kmeans.predict() self.means = kmeans.centroids self.covs = [] for i in np.unique(self.assignments): self.weights[int(i)] = (self.assignments == i).sum() self.covs.append(np.cov(self.X[(self.assignments == i)].T)) else: raise ValueError('Unknown type of init parameter') self.weights /= self.weights.sum()
'Expectation(E-step) for Gaussian Mixture.'
def _E_step(self):
likelihoods = self._get_likelihood(self.X) self.likelihood.append(likelihoods.sum()) weighted_likelihoods = self._get_weighted_likelihood(likelihoods) self.assignments = weighted_likelihoods.argmax(axis=1) weighted_likelihoods /= weighted_likelihoods.sum(axis=1)[:, np.newaxis] self.responsibilities = weighted_likelihoods
'Maximization (M-step) for Gaussian Mixture.'
def _M_step(self):
weights = self.responsibilities.sum(axis=0) for assignment in range(self.K): resp = self.responsibilities[:, assignment][:, np.newaxis] self.means[assignment] = ((resp * self.X).sum(axis=0) / resp.sum()) self.covs[assignment] = ((self.X - self.means[assignment]).T.dot(((self.X - self.means[assignment]) * resp)) / weights[assignment]) self.weights = (weights / weights.sum())
'Check if the difference of the latest two likelihood is less than the tolerance.'
def _is_converged(self):
if ((len(self.likelihood) > 1) and ((self.likelihood[(-1)] - self.likelihood[(-2)]) <= self.tolerance)): return True return False
'Get the assignments for X with GMM clusters.'
def _predict(self, X):
if (not X.shape): return self.assignments likelihoods = self._get_likelihood(X) weighted_likelihoods = self._get_weighted_likelihood(likelihoods) assignments = weighted_likelihoods.argmax(axis=1) return assignments
'Plot contour for 2D data.'
def plot(self, data=None, ax=None, holdon=False):
if (not ((len(self.X.shape) == 2) and (self.X.shape[1] == 2))): raise AttributeError('Only support for visualizing 2D data.') if (ax is None): (_, ax) = plt.subplots() if (data is None): data = self.X assignments = self.assignments else: assignments = self.predict(data) COLOR = 'bgrcmyk' cmap = (lambda assignment: COLOR[(int(assignment) % len(COLOR))]) delta = 0.025 margin = 0.2 (xmax, ymax) = (self.X.max(axis=0) + margin) (xmin, ymin) = (self.X.min(axis=0) - margin) (axis_X, axis_Y) = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta)) def grid_gaussian_pdf(mean, cov): grid_array = np.array(list(zip(axis_X.flatten(), axis_Y.flatten()))) return multivariate_normal.pdf(grid_array, mean, cov).reshape(axis_X.shape) if (assignments is None): c = None else: c = [cmap(assignment) for assignment in assignments] ax.scatter(data[:, 0], data[:, 1], c=c) for assignment in range(self.K): ax.contour(axis_X, axis_Y, grid_gaussian_pdf(self.means[assignment], self.covs[assignment]), colors=cmap(assignment)) if (not holdon): plt.show()
'Principal component analysis (PCA) implementation. Transforms a dataset of possibly correlated values into n linearly uncorrelated components. The components are ordered such that the first has the largest possible variance and each following component as the largest possible variance given the previous components. This causes the early components to contain most of the variability in the dataset. Parameters n_components : int solver : str, default \'svd\' {\'svd\', \'eigen\'}'
def __init__(self, n_components, solver='svd'):
self.solver = solver self.n_components = n_components self.components = None self.mean = None
'Performs an update of parameters.'
def update(self, network):
raise NotImplementedError
'Creates additional variables. Note: Must be called before optimization process.'
def setup(self, network):
raise NotImplementedError