language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def _recv(channel: socket.socket) -> Optional[str]: """Wait for a response on a channel.""" data = [] while True: try: data.append(channel.recv(4096).decode("utf-8")) # NOTE: Some older Vims can't convert expressions with None to # Vim values so just return a string res = "".join(data) _ = json.loads(res) return res except json.JSONDecodeError: pass except (KeyError, ConnectionError): return None
def _recv(channel: socket.socket) -> Optional[str]: """Wait for a response on a channel.""" data = [] while True: try: data.append(channel.recv(4096).decode("utf-8")) # NOTE: Some older Vims can't convert expressions with None to # Vim values so just return a string res = "".join(data) _ = json.loads(res) return res except json.JSONDecodeError: pass except (KeyError, ConnectionError): return None
Python
def _pos_from_offset(col: int, msg: bytes, offset: int) -> Tuple[int, int]: """Calculate the line and column of a given offset.""" msg = msg[:offset] lines = msg.split(b"\n") line = len(lines) - 1 col = len(lines[-1]) + (col if line == 0 else 0) return (line, col)
def _pos_from_offset(col: int, msg: bytes, offset: int) -> Tuple[int, int]: """Calculate the line and column of a given offset.""" msg = msg[:offset] lines = msg.split(b"\n") line = len(lines) - 1 col = len(lines[-1]) + (col if line == 0 else 0) return (line, col)
Python
def _between( buf: Sequence[bytes], start: Tuple[int, int], end: Tuple[int, int], ) -> bytes: """Return the text between a given start and end point.""" sline, scol = start eline, ecol = end lines: List[bytes] = [] for idx, line in enumerate(buf[sline : eline + 1]): lcol = scol if idx == 0 else 0 rcol = ecol + 1 if idx == eline - sline else len(line) lines.append(line[lcol:rcol]) return b"\n".join(lines)
def _between( buf: Sequence[bytes], start: Tuple[int, int], end: Tuple[int, int], ) -> bytes: """Return the text between a given start and end point.""" sline, scol = start eline, ecol = end lines: List[bytes] = [] for idx, line in enumerate(buf[sline : eline + 1]): lcol = scol if idx == 0 else 0 rcol = ecol + 1 if idx == eline - sline else len(line) lines.append(line[lcol:rcol]) return b"\n".join(lines)
Python
def _get_message_range( lines: Sequence[bytes], after: Tuple[int, int], ) -> Mapping[str, Tuple[int, int]]: """Return the next sentence to send after a given point.""" end_pos = _find_next_sentence(lines, *after) return {"start": after, "stop": end_pos}
def _get_message_range( lines: Sequence[bytes], after: Tuple[int, int], ) -> Mapping[str, Tuple[int, int]]: """Return the next sentence to send after a given point.""" end_pos = _find_next_sentence(lines, *after) return {"start": after, "stop": end_pos}
Python
def _find_next_sentence( lines: Sequence[bytes], sline: int, scol: int, ) -> Tuple[int, int]: """Find the next sentence to send to Coq.""" braces = {ord(c) for c in "{}"} bullets = {ord(c) for c in "-+*"} line, col = (sline, scol) while True: # Skip leading whitespace for line in range(sline, len(lines)): first_line = lines[line][col:].lstrip() if first_line.rstrip() != b"": col += len(lines[line][col:]) - len(first_line) break col = 0 else: # break not reached, nothing left in the buffer but whitespace raise NoDotError() # Skip leading comments if first_line.startswith(b"(*"): com_end = _skip_comment(lines, line, col) if com_end is None: raise UnmatchedError("(*", (line, col)) sline, col = com_end # Skip leading attributes elif first_line.startswith(b"#["): attr_end = _skip_attribute(lines, line, col) if attr_end is None: raise UnmatchedError("#[", (line, col)) sline, col = attr_end else: break # Check if the first character of the sentence is a bullet if first_line[0] in braces | bullets: # '-', '+', '*' can be repeated for c in first_line[1:]: if c in bullets and c == first_line[0]: col += 1 else: break return (line, col) # Check if this is a bracketed goal selector if _char_isdigit(first_line[0]): state = "digit" selcol = col for c in first_line[1:]: if state == "digit" and _char_isdigit(c): selcol += 1 elif state == "digit" and _char_isspace(c): state = "beforecolon" selcol += 1 elif state == "digit" and c == ord(":"): state = "aftercolon" selcol += 1 elif state == "beforecolon" and _char_isspace(c): selcol += 1 elif state == "beforecolon" and c == ord(":"): state = "aftercolon" selcol += 1 elif state == "aftercolon" and _char_isspace(c): selcol += 1 elif state == "aftercolon" and c == ord("{"): selcol += 1 return (line, selcol) else: break # Otherwise, find an ending '.' return _find_dot_after(lines, line, col)
def _find_next_sentence( lines: Sequence[bytes], sline: int, scol: int, ) -> Tuple[int, int]: """Find the next sentence to send to Coq.""" braces = {ord(c) for c in "{}"} bullets = {ord(c) for c in "-+*"} line, col = (sline, scol) while True: # Skip leading whitespace for line in range(sline, len(lines)): first_line = lines[line][col:].lstrip() if first_line.rstrip() != b"": col += len(lines[line][col:]) - len(first_line) break col = 0 else: # break not reached, nothing left in the buffer but whitespace raise NoDotError() # Skip leading comments if first_line.startswith(b"(*"): com_end = _skip_comment(lines, line, col) if com_end is None: raise UnmatchedError("(*", (line, col)) sline, col = com_end # Skip leading attributes elif first_line.startswith(b"#["): attr_end = _skip_attribute(lines, line, col) if attr_end is None: raise UnmatchedError("#[", (line, col)) sline, col = attr_end else: break # Check if the first character of the sentence is a bullet if first_line[0] in braces | bullets: # '-', '+', '*' can be repeated for c in first_line[1:]: if c in bullets and c == first_line[0]: col += 1 else: break return (line, col) # Check if this is a bracketed goal selector if _char_isdigit(first_line[0]): state = "digit" selcol = col for c in first_line[1:]: if state == "digit" and _char_isdigit(c): selcol += 1 elif state == "digit" and _char_isspace(c): state = "beforecolon" selcol += 1 elif state == "digit" and c == ord(":"): state = "aftercolon" selcol += 1 elif state == "beforecolon" and _char_isspace(c): selcol += 1 elif state == "beforecolon" and c == ord(":"): state = "aftercolon" selcol += 1 elif state == "aftercolon" and _char_isspace(c): selcol += 1 elif state == "aftercolon" and c == ord("{"): selcol += 1 return (line, selcol) else: break # Otherwise, find an ending '.' return _find_dot_after(lines, line, col)
Python
def _skip_block( lines: Sequence[bytes], sline: int, scol: int, sstr: bytes, estr: bytes, skips: Optional[Mapping[bytes, SkipFun]] = None, ) -> Optional[Tuple[int, int]]: """A generic function to skip the next block contained in sstr estr.""" assert lines[sline].startswith(sstr, scol) nesting = 1 max_line = len(lines) scol += len(sstr) if skips is None: skips = {} while nesting > 0: if sline >= max_line: return None line = lines[sline] blk_end = line.find(estr, scol) blk_end = blk_end if blk_end != -1 else None if sstr != estr: blk_start = line.find(sstr, scol, blk_end) blk_start = blk_start if blk_start != -1 else None else: blk_start = None assert blk_start is None or blk_end is None or blk_start < blk_end # Look for contained blocks to skip skip_stop = blk_start if blk_start is not None else blk_end skip_starts = [(line.find(skip, scol, skip_stop), skip) for skip in skips] skip_starts = [(start, skip) for start, skip in skip_starts if start != -1] skip_start, skip = min(skip_starts, default=(None, None)) if skip is not None and skip_start is not None: skip_end = skips[skip](lines, sline, skip_start) if skip_end is None: return None sline, scol = skip_end continue if blk_end is not None and blk_start is None: # Found an end and no new start scol = blk_end + len(estr) nesting -= 1 elif blk_start is not None: # Found a new start scol = blk_start + len(sstr) nesting += 1 else: # Nothing on this line sline += 1 scol = 0 return (sline, scol)
def _skip_block( lines: Sequence[bytes], sline: int, scol: int, sstr: bytes, estr: bytes, skips: Optional[Mapping[bytes, SkipFun]] = None, ) -> Optional[Tuple[int, int]]: """A generic function to skip the next block contained in sstr estr.""" assert lines[sline].startswith(sstr, scol) nesting = 1 max_line = len(lines) scol += len(sstr) if skips is None: skips = {} while nesting > 0: if sline >= max_line: return None line = lines[sline] blk_end = line.find(estr, scol) blk_end = blk_end if blk_end != -1 else None if sstr != estr: blk_start = line.find(sstr, scol, blk_end) blk_start = blk_start if blk_start != -1 else None else: blk_start = None assert blk_start is None or blk_end is None or blk_start < blk_end # Look for contained blocks to skip skip_stop = blk_start if blk_start is not None else blk_end skip_starts = [(line.find(skip, scol, skip_stop), skip) for skip in skips] skip_starts = [(start, skip) for start, skip in skip_starts if start != -1] skip_start, skip = min(skip_starts, default=(None, None)) if skip is not None and skip_start is not None: skip_end = skips[skip](lines, sline, skip_start) if skip_end is None: return None sline, scol = skip_end continue if blk_end is not None and blk_start is None: # Found an end and no new start scol = blk_end + len(estr) nesting -= 1 elif blk_start is not None: # Found a new start scol = blk_start + len(sstr) nesting += 1 else: # Nothing on this line sline += 1 scol = 0 return (sline, scol)
Python
def shift_slice(s: slice) -> slice: """Shift a 0-indexed to 1-indexed slice.""" return slice( s.start + 1 if s.start is not None else 1, s.stop + 1 if s.stop is not None else None, )
def shift_slice(s: slice) -> slice: """Shift a 0-indexed to 1-indexed slice.""" return slice( s.start + 1 if s.start is not None else 1, s.stop + 1 if s.stop is not None else None, )
Python
def discover_codebooks(location): """ Return list of codebooks given the directory location """ codebooks = [] for codebook_file in os.listdir(location): if codebook_file.endswith('.npy'): with open(os.path.join(location, codebook_file), 'rb+') as f: codebook = np.load(f, allow_pickle=True) codebooks.append((codebook_file, codebook.item())) #.item() to extract dictionary from 0d array return codebooks
def discover_codebooks(location): """ Return list of codebooks given the directory location """ codebooks = [] for codebook_file in os.listdir(location): if codebook_file.endswith('.npy'): with open(os.path.join(location, codebook_file), 'rb+') as f: codebook = np.load(f, allow_pickle=True) codebooks.append((codebook_file, codebook.item())) #.item() to extract dictionary from 0d array return codebooks
Python
def preprocess_codebook(codebook): """ Removes trajectories from keys and puts the trajectories into single symbol list format. Also removes skills with 0 frequency. """ trajectories = codebook.pop('trajectories') codebook_with_spaces = {} for key, value in codebook.items(): if key == 'probabilities' or key == 'length_range': continue # skip these if value > 0: codebook_with_spaces[key] = value single_symbol_traj_split = [] for trajectory in trajectories: for symbol in trajectory.split(" "): if symbol != "": single_symbol_traj_split.append(symbol) #trajectories = "".join(trajectories) return single_symbol_traj_split, codebook_with_spaces
def preprocess_codebook(codebook): """ Removes trajectories from keys and puts the trajectories into single symbol list format. Also removes skills with 0 frequency. """ trajectories = codebook.pop('trajectories') codebook_with_spaces = {} for key, value in codebook.items(): if key == 'probabilities' or key == 'length_range': continue # skip these if value > 0: codebook_with_spaces[key] = value single_symbol_traj_split = [] for trajectory in trajectories: for symbol in trajectory.split(" "): if symbol != "": single_symbol_traj_split.append(symbol) #trajectories = "".join(trajectories) return single_symbol_traj_split, codebook_with_spaces
Python
def calculate_codebook_dl(codebook): """ Given a codebook, calculate its description length: Length(encoding) + Size(Huffman Tree) The bit length of the encoding is easily calculated, but the size of the Huffman Tree can be represented simply as the number of bits required to recover the tree. In canonical form, it's just the number of bits needed to encode the bit LENGTHS of each symbol. """ trajectories, codebook = preprocess_codebook(codebook) codec = HuffmanCodec.from_frequencies(codebook) codec.encode(codebook) #codec.print_code_table() encoded = codec.encode(trajectories) trajectory_symbol_set = set(trajectories) tree_bits = 0 # Calculate the number of bits to send the tree for symbol, (bits, val) in codec._table.items(): if symbol in trajectory_symbol_set: tree_bits += len(symbol.encode('utf-8')) * 8 tree_bits += bits dl = len(encoded)*8 + tree_bits # * 8 for byte to bit conversion uncompressed_len = len("".join(trajectories).encode('utf-8')) * 8 return dl, tree_bits, codec, uncompressed_len #return len(encoded) * 8, tree_bits, codec, uncompressed_len #return tree_bits, tree_bits, codec, uncompressed_len
def calculate_codebook_dl(codebook): """ Given a codebook, calculate its description length: Length(encoding) + Size(Huffman Tree) The bit length of the encoding is easily calculated, but the size of the Huffman Tree can be represented simply as the number of bits required to recover the tree. In canonical form, it's just the number of bits needed to encode the bit LENGTHS of each symbol. """ trajectories, codebook = preprocess_codebook(codebook) codec = HuffmanCodec.from_frequencies(codebook) codec.encode(codebook) #codec.print_code_table() encoded = codec.encode(trajectories) trajectory_symbol_set = set(trajectories) tree_bits = 0 # Calculate the number of bits to send the tree for symbol, (bits, val) in codec._table.items(): if symbol in trajectory_symbol_set: tree_bits += len(symbol.encode('utf-8')) * 8 tree_bits += bits dl = len(encoded)*8 + tree_bits # * 8 for byte to bit conversion uncompressed_len = len("".join(trajectories).encode('utf-8')) * 8 return dl, tree_bits, codec, uncompressed_len #return len(encoded) * 8, tree_bits, codec, uncompressed_len #return tree_bits, tree_bits, codec, uncompressed_len
Python
def evaluate_codebook_parallel(env, codebooks, num_test=500, num_train=500, print_every=50): """ input: env: env variable codebooks: pre-processed codebook to evaluate num_test: number of test start/end pairs to evaluate, num_train: number of train start/end pairs to evaluate output: solutions: dict storing num_test trajectories for test set, num_train trajectories for train set for each codebook in codebooks entry form: (str form of trajectory, a_star search cost, start_pos, goal_pos) """ start_time = dt.datetime.now() count_train, count_test, count = 0, 0, 0 solutions = {} skills = {} for file, codebook, _ in codebooks: solutions[file] = {'test': [], 'train': []} skills[file] = [list(map(int, skill)) for skill in codebook.keys()] while 1: if count_train >= num_train and count_test >= num_test: break env.reset() if not training_valid(env) and count_test < num_test: # in test set results = [] no_solution = False out_q = mp.Queue() procs = [] result_dict = {} for i, (file, codebook, length_range) in enumerate(codebooks): if i % NUM_PARALLEL_THREADS == 0: for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() procs = [] p = mp.Process( target=a_star_parallel, kwargs={'env':env, 'out_q':out_q, 'skills':skills[file], 'name':file, 'length_range': length_range}, ) procs.append(p) p.start() for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() for file, (solution, cost) in result_dict.items(): if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) # simulate without interacting with env results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['test'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_test += 1 elif training_valid(env) and count_train < num_train: # in train set results = [] no_solution = False out_q = mp.Queue() procs = [] result_dict = {} for i, (file, codebook, length_range) in enumerate(codebooks): if i % NUM_PARALLEL_THREADS == 0: for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() procs = [] p = mp.Process( target=a_star_parallel, kwargs={'env':env, 'out_q':out_q, 'skills':skills[file], 'name':file, 'length_range': length_range}, ) procs.append(p) p.start() for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() for file, (solution, cost) in result_dict.items(): if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['train'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_train += 1 count += 1 if count % print_every == 0 and count != 0: curr_time = dt.datetime.now() time = (curr_time - start_time).total_seconds() print('Total env tried: %d, test trajectories collected = %d, train trajectories collected = %d, time elapsed = %f sec' % (count, count_test, count_train, time)) return solutions
def evaluate_codebook_parallel(env, codebooks, num_test=500, num_train=500, print_every=50): """ input: env: env variable codebooks: pre-processed codebook to evaluate num_test: number of test start/end pairs to evaluate, num_train: number of train start/end pairs to evaluate output: solutions: dict storing num_test trajectories for test set, num_train trajectories for train set for each codebook in codebooks entry form: (str form of trajectory, a_star search cost, start_pos, goal_pos) """ start_time = dt.datetime.now() count_train, count_test, count = 0, 0, 0 solutions = {} skills = {} for file, codebook, _ in codebooks: solutions[file] = {'test': [], 'train': []} skills[file] = [list(map(int, skill)) for skill in codebook.keys()] while 1: if count_train >= num_train and count_test >= num_test: break env.reset() if not training_valid(env) and count_test < num_test: # in test set results = [] no_solution = False out_q = mp.Queue() procs = [] result_dict = {} for i, (file, codebook, length_range) in enumerate(codebooks): if i % NUM_PARALLEL_THREADS == 0: for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() procs = [] p = mp.Process( target=a_star_parallel, kwargs={'env':env, 'out_q':out_q, 'skills':skills[file], 'name':file, 'length_range': length_range}, ) procs.append(p) p.start() for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() for file, (solution, cost) in result_dict.items(): if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) # simulate without interacting with env results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['test'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_test += 1 elif training_valid(env) and count_train < num_train: # in train set results = [] no_solution = False out_q = mp.Queue() procs = [] result_dict = {} for i, (file, codebook, length_range) in enumerate(codebooks): if i % NUM_PARALLEL_THREADS == 0: for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() procs = [] p = mp.Process( target=a_star_parallel, kwargs={'env':env, 'out_q':out_q, 'skills':skills[file], 'name':file, 'length_range': length_range}, ) procs.append(p) p.start() for _ in procs: result_dict.update(out_q.get()) for proc in procs: proc.join() for file, (solution, cost) in result_dict.items(): if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['train'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_train += 1 count += 1 if count % print_every == 0 and count != 0: curr_time = dt.datetime.now() time = (curr_time - start_time).total_seconds() print('Total env tried: %d, test trajectories collected = %d, train trajectories collected = %d, time elapsed = %f sec' % (count, count_test, count_train, time)) return solutions
Python
def evaluate_codebook(env, codebooks, num_test=500, num_train=500, print_every=50): """ input: env: env variable codebooks: pre-processed codebook to evaluate num_test: number of test start/end pairs to evaluate, num_train: number of train start/end pairs to evaluate output: solutions: dict storing num_test trajectories for test set, num_train trajectories for train set for each codebook in codebooks entry form: (str form of trajectory, a_star search cost, start_pos, goal_pos) """ start_time = dt.datetime.now() count_train, count_test, count = 0, 0, 0 solutions = {} skills = {} for file, codebook, _ in codebooks: solutions[file] = {'test': [], 'train': []} skills[file] = [list(map(int, skill)) for skill in codebook.keys()] while 1: if count_train >= num_train and count_test >= num_test: break env.reset() if not training_valid(env) and count_test < num_test: # in test set results = [] no_solution = False for file, codebook, length_range in codebooks: solution, cost = a_star(env, skills=skills[file], length_range=length_range) if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) # simulate without interacting with env results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['test'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_test += 1 elif training_valid(env) and count_train < num_train: # in train set results = [] no_solution = False for file, codebook, length_range in codebooks: solution, cost = a_star(env, skills=skills[file], length_range=length_range) if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['train'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_train += 1 count += 1 if count % print_every == 0 and count != 0: curr_time = dt.datetime.now() time = (curr_time - start_time).total_seconds() print('Total env tried: %d, test trajectories collected = %d, train trajectories collected = %d, time elapsed = %f sec' % (count, count_test, count_train, time)) return solutions
def evaluate_codebook(env, codebooks, num_test=500, num_train=500, print_every=50): """ input: env: env variable codebooks: pre-processed codebook to evaluate num_test: number of test start/end pairs to evaluate, num_train: number of train start/end pairs to evaluate output: solutions: dict storing num_test trajectories for test set, num_train trajectories for train set for each codebook in codebooks entry form: (str form of trajectory, a_star search cost, start_pos, goal_pos) """ start_time = dt.datetime.now() count_train, count_test, count = 0, 0, 0 solutions = {} skills = {} for file, codebook, _ in codebooks: solutions[file] = {'test': [], 'train': []} skills[file] = [list(map(int, skill)) for skill in codebook.keys()] while 1: if count_train >= num_train and count_test >= num_test: break env.reset() if not training_valid(env) and count_test < num_test: # in test set results = [] no_solution = False for file, codebook, length_range in codebooks: solution, cost = a_star(env, skills=skills[file], length_range=length_range) if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) # simulate without interacting with env results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['test'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_test += 1 elif training_valid(env) and count_train < num_train: # in train set results = [] no_solution = False for file, codebook, length_range in codebooks: solution, cost = a_star(env, skills=skills[file], length_range=length_range) if solution is None and cost is None: no_solution = True break traj = Trajectory(solution, env, simulate=True) results.append((file, traj, cost)) if not no_solution: for file, traj, cost in results: solutions[file]['train'].append((str(traj), cost, traj.start_pos, traj.goal_pos)) count_train += 1 count += 1 if count % print_every == 0 and count != 0: curr_time = dt.datetime.now() time = (curr_time - start_time).total_seconds() print('Total env tried: %d, test trajectories collected = %d, train trajectories collected = %d, time elapsed = %f sec' % (count, count_test, count_train, time)) return solutions
Python
def collect_data_method1(env, data_folder, seed=None, num_code_books=10, num_trajectories=1000): """ Method 1: randomly define skills, collect A* trajectories with these skills , save the trajectories and skill frequencies as codebook """ if seed is not None: env.seed(seed) for i in range(num_code_books): skills = generate_skills() print('Generated skills (len=%d):' % len(skills), skills) trajectories = collect_trajectories(env, skills, num=num_trajectories, show=False) code_book = build_codebook_method_1(trajectories, skills) # print(code_book) path = os.path.join(data_folder, 'code_book'+str(i+1)+'.npy') np.save(path, code_book) print('Codebook saved to %s' % path) # cb = np.load(path, allow_pickle=True).item() # print(cb)
def collect_data_method1(env, data_folder, seed=None, num_code_books=10, num_trajectories=1000): """ Method 1: randomly define skills, collect A* trajectories with these skills , save the trajectories and skill frequencies as codebook """ if seed is not None: env.seed(seed) for i in range(num_code_books): skills = generate_skills() print('Generated skills (len=%d):' % len(skills), skills) trajectories = collect_trajectories(env, skills, num=num_trajectories, show=False) code_book = build_codebook_method_1(trajectories, skills) # print(code_book) path = os.path.join(data_folder, 'code_book'+str(i+1)+'.npy') np.save(path, code_book) print('Codebook saved to %s' % path) # cb = np.load(path, allow_pickle=True).item() # print(cb)
Python
def collect_data_method2(env, data_folder, skill_length_range, num_skills=None, uniform=False, seed=None, num_code_books=20, num_trajectories=100): """ Method 2: collect A* trajectories with only primitive actions, randomly dissect trajectories to form skills, save the trajectories and skill frequencies as codebook """ if seed is not None: env.seed(seed) trajectories = collect_trajectories(env, num=num_trajectories, show=False) count = 0 while count < num_code_books: code_book = build_codebook_method_2(trajectories, skill_length_range, True) # print(code_book) # minus 2: 'length_range' & 'probabilities' if len(code_book) - 2 == num_skills and num_skills is not None or num_skills is None: path = os.path.join(data_folder, 'code_book' + str(count + 1) + '.npy') np.save(path, code_book) print('Codebook saved to %s' % path) count += 1
def collect_data_method2(env, data_folder, skill_length_range, num_skills=None, uniform=False, seed=None, num_code_books=20, num_trajectories=100): """ Method 2: collect A* trajectories with only primitive actions, randomly dissect trajectories to form skills, save the trajectories and skill frequencies as codebook """ if seed is not None: env.seed(seed) trajectories = collect_trajectories(env, num=num_trajectories, show=False) count = 0 while count < num_code_books: code_book = build_codebook_method_2(trajectories, skill_length_range, True) # print(code_book) # minus 2: 'length_range' & 'probabilities' if len(code_book) - 2 == num_skills and num_skills is not None or num_skills is None: path = os.path.join(data_folder, 'code_book' + str(count + 1) + '.npy') np.save(path, code_book) print('Codebook saved to %s' % path) count += 1
Python
def calculate_codebook_metrics(location): """ Return list of smoothed, averaged codebook returns given the codebook location """ dirs_to_search = ['rl_logs_train', 'rl_logs_test'] metrics = {} for log_dir in dirs_to_search: for codebook_file in os.listdir(os.path.join(location, log_dir)): for seed_dir in os.listdir(os.path.join(location, log_dir, codebook_file)): for inner_file in os.listdir(os.path.join(location, log_dir, codebook_file, seed_dir)): if inner_file.endswith('progress.csv'): progress_csv = os.path.join(location, log_dir, codebook_file, seed_dir, inner_file) df = pd.read_csv(progress_csv) rewards = df['evaluation/Average Returns'].to_numpy() #path_length = df['evaluation/path length Mean'].to_numpy() stripped_codebook_file = codebook_file.replace('rl_', '') stripped_codebook_file += '.npy' if stripped_codebook_file not in metrics: metrics[stripped_codebook_file] = dict(train=[], test=[]) if 'train' in log_dir: metrics[stripped_codebook_file]['train'].append(rewards) else: metrics[stripped_codebook_file]['test'].append(rewards) for codebook_file in metrics.keys(): smoothed_train = smooth(metrics[codebook_file]['train'], 0.5) smoothed_test = smooth(metrics[codebook_file]['test'], 0.5) metrics[codebook_file]['train'] = smoothed_train # (np.mean(smoothed_train, axis=0), np.var(smoothed_train)) metrics[codebook_file]['test'] = smoothed_test # (np.mean(smoothed_test, axis=0), np.var(smoothed_test)) return metrics
def calculate_codebook_metrics(location): """ Return list of smoothed, averaged codebook returns given the codebook location """ dirs_to_search = ['rl_logs_train', 'rl_logs_test'] metrics = {} for log_dir in dirs_to_search: for codebook_file in os.listdir(os.path.join(location, log_dir)): for seed_dir in os.listdir(os.path.join(location, log_dir, codebook_file)): for inner_file in os.listdir(os.path.join(location, log_dir, codebook_file, seed_dir)): if inner_file.endswith('progress.csv'): progress_csv = os.path.join(location, log_dir, codebook_file, seed_dir, inner_file) df = pd.read_csv(progress_csv) rewards = df['evaluation/Average Returns'].to_numpy() #path_length = df['evaluation/path length Mean'].to_numpy() stripped_codebook_file = codebook_file.replace('rl_', '') stripped_codebook_file += '.npy' if stripped_codebook_file not in metrics: metrics[stripped_codebook_file] = dict(train=[], test=[]) if 'train' in log_dir: metrics[stripped_codebook_file]['train'].append(rewards) else: metrics[stripped_codebook_file]['test'].append(rewards) for codebook_file in metrics.keys(): smoothed_train = smooth(metrics[codebook_file]['train'], 0.5) smoothed_test = smooth(metrics[codebook_file]['test'], 0.5) metrics[codebook_file]['train'] = smoothed_train # (np.mean(smoothed_train, axis=0), np.var(smoothed_train)) metrics[codebook_file]['test'] = smoothed_test # (np.mean(smoothed_test, axis=0), np.var(smoothed_test)) return metrics
Python
def discover_evaluations(location): """ Return list of evaluations given the codebook location """ codebooks = [] for codebook_file in os.listdir(location): if codebook_file.endswith('.npy'): with open(os.path.join(location, codebook_file), 'rb+') as f: codebook = np.load(f, allow_pickle=True) codebooks.append((codebook_file, codebook.item())) #.item() to extract dictionary from 0d array return codebooks
def discover_evaluations(location): """ Return list of evaluations given the codebook location """ codebooks = [] for codebook_file in os.listdir(location): if codebook_file.endswith('.npy'): with open(os.path.join(location, codebook_file), 'rb+') as f: codebook = np.load(f, allow_pickle=True) codebooks.append((codebook_file, codebook.item())) #.item() to extract dictionary from 0d array return codebooks
Python
def process_evaluation(evaluation, codec, tree_bits, name, trajectory_dict): """ Adds to trajectory_dict the mappings from start/end positions to the various metrics stored for the associated codebook """ test_trajectories = evaluation.pop('test') train_trajectories = evaluation.pop('train') def process_trajectories(trajectories, traj_type): for trajectory, node_cost, start, end in trajectories: trajectory_id = (start, end) cleaned_trajectory = list(filter(lambda a: a != "", trajectory.split(" "))) code_length = len(codec.encode(cleaned_trajectory)) * 8 num_primitive_actions = len(trajectory.replace(" ", "")) num_abstract_actions = len(cleaned_trajectory) metrics = dict( num_primitive_actions=num_primitive_actions, num_abstract_actions=num_abstract_actions, code_length=code_length, description_length=code_length + tree_bits, node_cost=node_cost) if trajectory_id not in trajectory_dict[traj_type]: trajectory_dict[traj_type][trajectory_id] = {name: metrics} else: trajectory_dict[traj_type][trajectory_id][name] = metrics process_trajectories(train_trajectories, 'train') process_trajectories(test_trajectories, 'test')
def process_evaluation(evaluation, codec, tree_bits, name, trajectory_dict): """ Adds to trajectory_dict the mappings from start/end positions to the various metrics stored for the associated codebook """ test_trajectories = evaluation.pop('test') train_trajectories = evaluation.pop('train') def process_trajectories(trajectories, traj_type): for trajectory, node_cost, start, end in trajectories: trajectory_id = (start, end) cleaned_trajectory = list(filter(lambda a: a != "", trajectory.split(" "))) code_length = len(codec.encode(cleaned_trajectory)) * 8 num_primitive_actions = len(trajectory.replace(" ", "")) num_abstract_actions = len(cleaned_trajectory) metrics = dict( num_primitive_actions=num_primitive_actions, num_abstract_actions=num_abstract_actions, code_length=code_length, description_length=code_length + tree_bits, node_cost=node_cost) if trajectory_id not in trajectory_dict[traj_type]: trajectory_dict[traj_type][trajectory_id] = {name: metrics} else: trajectory_dict[traj_type][trajectory_id][name] = metrics process_trajectories(train_trajectories, 'train') process_trajectories(test_trajectories, 'test')
Python
def shrink_features(cls, threshold=5): ''' Shrink the features whose appearence is less than the setting threshold. ''' shrinked_index = 0 shrinked_feature = {} cls.FEATURE_INDEX = {} # Regenerate index for shrinked feature space for fea, index in cls.FEATURE.iteritems(): if cls.FEATURE_APPEARENCE[index] >= threshold: shrinked_feature[fea] = index cls.FEATURE_INDEX[index] = shrinked_index shrinked_index += 1 shrinked_feature_number = cls.feature_number - shrinked_index cls.feature_number = shrinked_index cls.FEATURE_APPEARENCE = None logging.info('[OK]...Feature Shrinking') logging.info('---# of shrinked Features: {0}'.format(shrinked_feature_number))
def shrink_features(cls, threshold=5): ''' Shrink the features whose appearence is less than the setting threshold. ''' shrinked_index = 0 shrinked_feature = {} cls.FEATURE_INDEX = {} # Regenerate index for shrinked feature space for fea, index in cls.FEATURE.iteritems(): if cls.FEATURE_APPEARENCE[index] >= threshold: shrinked_feature[fea] = index cls.FEATURE_INDEX[index] = shrinked_index shrinked_index += 1 shrinked_feature_number = cls.feature_number - shrinked_index cls.feature_number = shrinked_index cls.FEATURE_APPEARENCE = None logging.info('[OK]...Feature Shrinking') logging.info('---# of shrinked Features: {0}'.format(shrinked_feature_number))
Python
def _feature_vector_generation(self): ''' Generate the feature vector in the shrinked feature space. ''' return dict( [ (str(MentionDatum.FEATURE_INDEX[index]), 1) for index in self.features if index in MentionDatum.FEATURE_INDEX ] )
def _feature_vector_generation(self): ''' Generate the feature vector in the shrinked feature space. ''' return dict( [ (str(MentionDatum.FEATURE_INDEX[index]), 1) for index in self.features if index in MentionDatum.FEATURE_INDEX ] )
Python
def regenerate_feature(cls, mentions): ''' Generate feature vectors for all relation mentions ''' return [mention._feature_vector_generation() for mention in mentions]
def regenerate_feature(cls, mentions): ''' Generate feature vectors for all relation mentions ''' return [mention._feature_vector_generation() for mention in mentions]
Python
def transpose_values(cls): ''' Transpose all value dicts for the generation of datum files. ''' cls.ENTITY = dict( zip(cls.ENTITY.values(), cls.ENTITY.keys()) ) cls.TYPE = dict(zip(cls.TYPE.values(), cls.TYPE.keys())) cls.NE = dict(zip(cls.NE.values(), cls.NE.keys())) cls.SLOT = dict(zip(cls.SLOT.values(), cls.SLOT.keys())) cls.RELATION = dict( zip(cls.RELATION.values(), cls.RELATION.keys()) ) cls.FEATURE = dict( zip(cls.FEATURE.values(), cls.FEATURE.keys()) )
def transpose_values(cls): ''' Transpose all value dicts for the generation of datum files. ''' cls.ENTITY = dict( zip(cls.ENTITY.values(), cls.ENTITY.keys()) ) cls.TYPE = dict(zip(cls.TYPE.values(), cls.TYPE.keys())) cls.NE = dict(zip(cls.NE.values(), cls.NE.keys())) cls.SLOT = dict(zip(cls.SLOT.values(), cls.SLOT.keys())) cls.RELATION = dict( zip(cls.RELATION.values(), cls.RELATION.keys()) ) cls.FEATURE = dict( zip(cls.FEATURE.values(), cls.FEATURE.keys()) )
Python
def _subsample_negatives(mention): ''' Subsample negatives from mention. :type mention: MentionDatum :rtype boolean ''' nr = MentionDatum.RELATION.get('_NR', None) if nr is not None\ and [nr] == mention.relation\ and random.uniform(0, 1) > NEG_RATIO: return False return True
def _subsample_negatives(mention): ''' Subsample negatives from mention. :type mention: MentionDatum :rtype boolean ''' nr = MentionDatum.RELATION.get('_NR', None) if nr is not None\ and [nr] == mention.relation\ and random.uniform(0, 1) > NEG_RATIO: return False return True
Python
def _generate_file_path(index, generate_mode='{0}/kb_part-00{1:0>2d}.datums'): ''' Generate the file path in the directory ''' return generate_mode.format(directory, index)
def _generate_file_path(index, generate_mode='{0}/kb_part-00{1:0>2d}.datums'): ''' Generate the file path in the directory ''' return generate_mode.format(directory, index)
Python
def _assign_cluster_relation(predicts, mentions): ''' Assign each cluster the most similar relation according to the assumption. -------------------------------------------------------------------------- :type predicts: numpy.ndarray[n_samples,] :type mentions: List[MentionDatum] :rtype: List[(int, double)] ''' start = time.clock() relation_for_clusters = [] # Predicts -> clusters clusters = _predict_to_cluster(predicts, mentions) for cluster in clusters: relation_counter = collections.Counter(cluster) logging.info('---Cluster assign: {0}'.format(relation_counter)) assign_relation = relation_counter.most_common(1)[0] relation_for_clusters.append( ( assign_relation[0], (assign_relation[1]+0.0)/len(cluster), ) ) time_cost = time.clock() - start logging.info('---[OK]...Assign cluster relations cost of {0}'.format(time_cost)) return relation_for_clusters
def _assign_cluster_relation(predicts, mentions): ''' Assign each cluster the most similar relation according to the assumption. -------------------------------------------------------------------------- :type predicts: numpy.ndarray[n_samples,] :type mentions: List[MentionDatum] :rtype: List[(int, double)] ''' start = time.clock() relation_for_clusters = [] # Predicts -> clusters clusters = _predict_to_cluster(predicts, mentions) for cluster in clusters: relation_counter = collections.Counter(cluster) logging.info('---Cluster assign: {0}'.format(relation_counter)) assign_relation = relation_counter.most_common(1)[0] relation_for_clusters.append( ( assign_relation[0], (assign_relation[1]+0.0)/len(cluster), ) ) time_cost = time.clock() - start logging.info('---[OK]...Assign cluster relations cost of {0}'.format(time_cost)) return relation_for_clusters
Python
def _subsample_mention(predicts, clusters, mentions): ''' Subsample mentions in a cluster based on the probability of the relation. ------------------------------------------------------------------------- :type predicts: numpy.ndarray[n_samples,] :type clusters: List[(int, double)] :type mentions: List[MentionDatum] :rtype: None ''' start = time.clock() subsample_number = 0 for index, predict in enumerate(predicts): relation, probability = clusters[predict] if not SUBSAMPLE or random.random() < probability: mentions[index].relabel_relation.append(relation) subsample_number += 1 time_cost = time.clock() - start logging.info('---[OK]...Subsample mentions cost of {0}'.format(time_cost)) logging.info('------# of subsamples: {0}'.format(subsample_number))
def _subsample_mention(predicts, clusters, mentions): ''' Subsample mentions in a cluster based on the probability of the relation. ------------------------------------------------------------------------- :type predicts: numpy.ndarray[n_samples,] :type clusters: List[(int, double)] :type mentions: List[MentionDatum] :rtype: None ''' start = time.clock() subsample_number = 0 for index, predict in enumerate(predicts): relation, probability = clusters[predict] if not SUBSAMPLE or random.random() < probability: mentions[index].relabel_relation.append(relation) subsample_number += 1 time_cost = time.clock() - start logging.info('---[OK]...Subsample mentions cost of {0}'.format(time_cost)) logging.info('------# of subsamples: {0}'.format(subsample_number))
Python
def kmeans_predict(mentions, cluster_number=100): ''' The framework predicts labels of mentions as following: 1. Generate the feature space 2. Kmeans divides the feature space into k clusters 3. Reassign each cluster a relation based on DS 4. Subsample mentions in the cluster to be labeled with corresponding relation NOTE: Usually k is much higher than the # of known relations. --------------------------------------------------- :type mentions:List[DatumMention] :type cluster_number:int :rtype None ''' start = time.clock() feature_space = _generate_feature_space(mentions) predicts = _minibatchkmeans(feature_space, cluster_number) relation_for_clusters = _assign_cluster_relation(predicts, mentions) _generate_cluster(predicts, relation_for_clusters, mentions) _subsample_mention(predicts, relation_for_clusters, mentions) logging.info('[OK]...Framework | Cost {0}s'.format(time.clock()-start))
def kmeans_predict(mentions, cluster_number=100): ''' The framework predicts labels of mentions as following: 1. Generate the feature space 2. Kmeans divides the feature space into k clusters 3. Reassign each cluster a relation based on DS 4. Subsample mentions in the cluster to be labeled with corresponding relation NOTE: Usually k is much higher than the # of known relations. --------------------------------------------------- :type mentions:List[DatumMention] :type cluster_number:int :rtype None ''' start = time.clock() feature_space = _generate_feature_space(mentions) predicts = _minibatchkmeans(feature_space, cluster_number) relation_for_clusters = _assign_cluster_relation(predicts, mentions) _generate_cluster(predicts, relation_for_clusters, mentions) _subsample_mention(predicts, relation_for_clusters, mentions) logging.info('[OK]...Framework | Cost {0}s'.format(time.clock()-start))
Python
def regenerate_datums(mentions, filepath): ''' Regenerate datums with the new relation ------------------------------------------------- :type mentions: List[MentionDatum] :type filepath: basestring :rtype: None ''' start = time.clock() file_number = len(mentions) / 90000 + 1 negative_number = 0 nr = MentionDatum.RELATION.get('_NR') #transpose values MentionDatum.transpose_values() for index in xrange(file_number): with open(filepath + '/{0:0>2d}.datums'.format(index), 'w') as f: for mention in mentions[index*90000:(index+1)*90000]: if nr in mention.relabel_relation: negative_number += 1 f.write(str(mention)) f.write('\n') logging.debug('---[OK]...Generate {0:0>2d}.datums'.format(index)) spend = time.clock() - start logging.info('[OK]...Generate {0} Datums File'.format(file_number)) logging.info('[OK]...Negative number: {0}'.format(negative_number)) logging.info('---Cost time: {0} | Average per file: {1}'.format(spend, spend/file_number))
def regenerate_datums(mentions, filepath): ''' Regenerate datums with the new relation ------------------------------------------------- :type mentions: List[MentionDatum] :type filepath: basestring :rtype: None ''' start = time.clock() file_number = len(mentions) / 90000 + 1 negative_number = 0 nr = MentionDatum.RELATION.get('_NR') #transpose values MentionDatum.transpose_values() for index in xrange(file_number): with open(filepath + '/{0:0>2d}.datums'.format(index), 'w') as f: for mention in mentions[index*90000:(index+1)*90000]: if nr in mention.relabel_relation: negative_number += 1 f.write(str(mention)) f.write('\n') logging.debug('---[OK]...Generate {0:0>2d}.datums'.format(index)) spend = time.clock() - start logging.info('[OK]...Generate {0} Datums File'.format(file_number)) logging.info('[OK]...Negative number: {0}'.format(negative_number)) logging.info('---Cost time: {0} | Average per file: {1}'.format(spend, spend/file_number))
Python
def parse_fixed(cls, data): '''Parse and return the fixed-length part of a SOCKS request Returns a tuple containing (vn, cd, dst_port, dst_ip) given the raw socks request ''' return struct.unpack('>BBHL', data[:8])
def parse_fixed(cls, data): '''Parse and return the fixed-length part of a SOCKS request Returns a tuple containing (vn, cd, dst_port, dst_ip) given the raw socks request ''' return struct.unpack('>BBHL', data[:8])
Python
def parse_vn(self, data): '''Parse and store the version number given the raw SOCKS request''' vn, _, _, _ = ClientRequest.parse_fixed(data) if (vn != CLIENT_VN): self.invalid = True
def parse_vn(self, data): '''Parse and store the version number given the raw SOCKS request''' vn, _, _, _ = ClientRequest.parse_fixed(data) if (vn != CLIENT_VN): self.invalid = True
Python
def parse_cd(self, data): '''Parse and store the request code given the raw SOCKS request''' _, cd, _, _ = ClientRequest.parse_fixed(data) if (cd == REQUEST_CD_CONNECT or cd == REQUEST_CD_BIND): self.cd = cd else: self.invalid = True
def parse_cd(self, data): '''Parse and store the request code given the raw SOCKS request''' _, cd, _, _ = ClientRequest.parse_fixed(data) if (cd == REQUEST_CD_CONNECT or cd == REQUEST_CD_BIND): self.cd = cd else: self.invalid = True
Python
def parse_ip(self, data): '''Parse and store the destination ip given the raw SOCKS request If the IP is of the form 0.0.0.(1-255), attempt to resolve the domain name specified, then store the resolved ip as the destination ip. ''' _, _, _, dst_ip = ClientRequest.parse_fixed(data) ip = ipaddr.IPv4Address(dst_ip) o1, o2, o3, o4 = ip.packed # Invalid ip address specifying that we must resolve the domain # specified in data (As specified in SOCKS4a) if (o1, o2, o3) == (0, 0, 0) and o4 != 0: try: # Variable length part of the request containing the userid # and domain (8th byte onwards) userid_and_domain = data[8:] # Extract the domain to resolve _, domain, _ = userid_and_domain.split(b'\x00') except ValueError: # Error parsing request self.invalid = True return try: resolved_ip = socket.gethostbyname(domain) except socket.gaierror: # Domain name not found self.invalid = True return self.dst_ip = resolved_ip else: self.dst_ip = ip.exploded
def parse_ip(self, data): '''Parse and store the destination ip given the raw SOCKS request If the IP is of the form 0.0.0.(1-255), attempt to resolve the domain name specified, then store the resolved ip as the destination ip. ''' _, _, _, dst_ip = ClientRequest.parse_fixed(data) ip = ipaddr.IPv4Address(dst_ip) o1, o2, o3, o4 = ip.packed # Invalid ip address specifying that we must resolve the domain # specified in data (As specified in SOCKS4a) if (o1, o2, o3) == (0, 0, 0) and o4 != 0: try: # Variable length part of the request containing the userid # and domain (8th byte onwards) userid_and_domain = data[8:] # Extract the domain to resolve _, domain, _ = userid_and_domain.split(b'\x00') except ValueError: # Error parsing request self.invalid = True return try: resolved_ip = socket.gethostbyname(domain) except socket.gaierror: # Domain name not found self.invalid = True return self.dst_ip = resolved_ip else: self.dst_ip = ip.exploded
Python
def parse_userid(self, data): '''Parse and store the userid given the raw SOCKS request''' try: index = data.index(b'\x00') self.userid = data[8:index] except ValueError: self.invalid = True except IndexError: self.invalid = True
def parse_userid(self, data): '''Parse and store the userid given the raw SOCKS request''' try: index = data.index(b'\x00') self.userid = data[8:index] except ValueError: self.invalid = True except IndexError: self.invalid = True
Python
def build_socks_reply(cd, dst_port=0x0000, dst_ip='0.0.0.0'): ''' Build a SOCKS4 reply with the specified reply code, destination port and destination ip. ''' # dst_ip_bytes = ipaddress.IPv4Address(dst_ip).packed dst_ip_bytes = ipaddr.IPv4Address(dst_ip).packed dst_ip_raw, = struct.unpack('>L', dst_ip_bytes) return struct.pack('>BBHL', SERVER_VN, cd, dst_port, dst_ip_raw)
def build_socks_reply(cd, dst_port=0x0000, dst_ip='0.0.0.0'): ''' Build a SOCKS4 reply with the specified reply code, destination port and destination ip. ''' # dst_ip_bytes = ipaddress.IPv4Address(dst_ip).packed dst_ip_bytes = ipaddr.IPv4Address(dst_ip).packed dst_ip_raw, = struct.unpack('>L', dst_ip_bytes) return struct.pack('>BBHL', SERVER_VN, cd, dst_port, dst_ip_raw)
Python
def pid_exists(pid): """Check whether pid exists in the current process table.""" if pid < 0: return False try: os.kill(pid, 0) except OSError, e: return e.errno == errno.EPERM else: return True
def pid_exists(pid): """Check whether pid exists in the current process table.""" if pid < 0: return False try: os.kill(pid, 0) except OSError, e: return e.errno == errno.EPERM else: return True
Python
def printListElement(list1, index): ''' This function will print ana element from the list as determined by the list index. If the list index is invalid, it will print an error message. ''' try: print(list1[index]) except: print("Error: bad index number.")
def printListElement(list1, index): ''' This function will print ana element from the list as determined by the list index. If the list index is invalid, it will print an error message. ''' try: print(list1[index]) except: print("Error: bad index number.")
Python
def generate_strong_inp(length, reservoir_size): # Random neurons in the reservoir acts as inputs """ Args: length - Number of input neurons Returns: out - Input vector of length equals the number of neurons in the reservoir with randomly chosen neuron set active idx - List of chosen input neurons """ inp = [0] * reservoir_size x = [0] * length idx = np.random.choice(length, np.random.randint(reservoir_size)) for i in idx: x[i] = 1.0e4 inp[:len(x)] = x return inp, idx
def generate_strong_inp(length, reservoir_size): # Random neurons in the reservoir acts as inputs """ Args: length - Number of input neurons Returns: out - Input vector of length equals the number of neurons in the reservoir with randomly chosen neuron set active idx - List of chosen input neurons """ inp = [0] * reservoir_size x = [0] * length idx = np.random.choice(length, np.random.randint(reservoir_size)) for i in idx: x[i] = 1.0e4 inp[:len(x)] = x return inp, idx
Python
def multi_one_hot_inp(ne, inputs, n_nodes_per_inp): """Args: ne - Number of excitatory units in sorn inputs - input labels n_nodes_per_inp - Number of target units in pool that receives single input Returns: one_hot_vector for each label with length equals ne""" one_hot = np.zeros((ne, len(inputs))) idxs = [] for _ in range(n_nodes_per_inp): idxs.append(random.sample(range(0, ne), len(inputs))) idxs = list(zip(*idxs)) j = 0 # Max(j) = len(inputs) for idx_list in idxs: for i in idx_list: one_hot[i][j] = 1 j += 1 return one_hot, idxs
def multi_one_hot_inp(ne, inputs, n_nodes_per_inp): """Args: ne - Number of excitatory units in sorn inputs - input labels n_nodes_per_inp - Number of target units in pool that receives single input Returns: one_hot_vector for each label with length equals ne""" one_hot = np.zeros((ne, len(inputs))) idxs = [] for _ in range(n_nodes_per_inp): idxs.append(random.sample(range(0, ne), len(inputs))) idxs = list(zip(*idxs)) j = 0 # Max(j) = len(inputs) for idx_list in idxs: for i in idx_list: one_hot[i][j] = 1 j += 1 return one_hot, idxs
Python
def generate_gaussian_inputs(length, reservoir_size): # Randomly neurons in the reservoir acts as inputs """ Args: length - Number of input neurons Returns: out - Input vector of length equals the number of neurons in the reservoir with randomly chosen neuron set active idx - List of chosen input neurons """ out = [0] * reservoir_size x = [0] * length idx = np.random.choice(length, np.random.randint(reservoir_size)) inp = np.random.normal(length) for i in idx: x[i] = inp[i] out[:len(x)] = x return out, idx
def generate_gaussian_inputs(length, reservoir_size): # Randomly neurons in the reservoir acts as inputs """ Args: length - Number of input neurons Returns: out - Input vector of length equals the number of neurons in the reservoir with randomly chosen neuron set active idx - List of chosen input neurons """ out = [0] * reservoir_size x = [0] * length idx = np.random.choice(length, np.random.randint(reservoir_size)) inp = np.random.normal(length) for i in idx: x[i] = inp[i] out[:len(x)] = x return out, idx
Python
def normalize_weight_matrix(weight_matrix): # Applied only while initializing the weight. During simulation, Synaptic scaling applied on weight matrices """ Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1 Args: weight_matrix(array) -- Incoming Weights from W_ee or W_ei or W_ie Returns: weight_matrix(array) -- Normalized weight matrix""" normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0) return normalized_weight_matrix
def normalize_weight_matrix(weight_matrix): # Applied only while initializing the weight. During simulation, Synaptic scaling applied on weight matrices """ Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1 Args: weight_matrix(array) -- Incoming Weights from W_ee or W_ei or W_ie Returns: weight_matrix(array) -- Normalized weight matrix""" normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0) return normalized_weight_matrix
Python
def generate_lambd_connections(synaptic_connection, ne, ni, lambd_w, lambd_std): """ Args: synaptic_connection - Type of sysnpatic connection (EE,EI or IE) ne - Number of excitatory units ni - Number of inhibitory units lambd_w - Average number of incoming connections lambd_std - Standard deviation of average number of connections per neuron Returns: connection_weights - Weight matrix """ if synaptic_connection == 'EE': """Choose random lamda connections per neuron""" # Draw normally distributed ne integers with mean lambd_w lambdas_incoming = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int) # lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int) # List of neurons list_neurons = list(range(ne)) # Connection weights connection_weights = np.zeros((ne, ne)) # For each lambd value in the above list, # generate weights for incoming and outgoing connections # -------------Gaussian Distribution of weights -------------- # weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution # Centered around 2 to make all values positive # ------------Uniform Distribution -------------------------- global_incoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming)) # Index Counter global_incoming_weights_idx = 0 # Choose the neurons in order [0 to 199] for neuron in list_neurons: # Choose ramdom unique (lambdas[neuron]) neurons from list_neurons possible_connections = list_neurons.copy() possible_connections.remove(neuron) # Remove the selected neuron from possible connections i!=j # Choose random presynaptic neurons possible_incoming_connections = random.sample(possible_connections, lambdas_incoming[neuron]) incoming_weights_neuron = global_incoming_weights[ global_incoming_weights_idx:global_incoming_weights_idx + lambdas_incoming[ neuron]] # ---------- Update the connection weight matrix ------------ # Update incoming connection weights for selected 'neuron' for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron): connection_weights[possible_incoming_connections[incoming_idx]][neuron] = incoming_weight global_incoming_weights_idx += lambdas_incoming[neuron] return connection_weights if synaptic_connection == 'EI': """Choose random lamda connections per neuron""" # Draw normally distributed ni integers with mean lambd_w lambdas = norm.ppf(np.random.random(ni), loc=lambd_w, scale=lambd_std).astype(int) # List of neurons list_neurons = list(range(ni)) # Each i can connect with random ne neurons # Initializing connection weights variable connection_weights = np.zeros((ni, ne)) # ------------Uniform Distribution ----------------------------- global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas)) # Index Counter global_outgoing_weights_idx = 0 # Choose the neurons in order [0 to 40] for neuron in list_neurons: # Choose random unique (lambdas[neuron]) neurons from list_neurons possible_connections = list(range(ne)) possible_outgoing_connections = random.sample(possible_connections, lambdas[ neuron]) # possible_outgoing connections to the neuron # Update weights outgoing_weights = global_outgoing_weights[ global_outgoing_weights_idx:global_outgoing_weights_idx + lambdas[neuron]] # ---------- Update the connection weight matrix ------------ # Update outgoing connections for the neuron for outgoing_idx, outgoing_weight in enumerate( outgoing_weights): # Update the columns in the connection matrix connection_weights[neuron][possible_outgoing_connections[outgoing_idx]] = outgoing_weight # Update the global weight values index global_outgoing_weights_idx += lambdas[neuron] return connection_weights
def generate_lambd_connections(synaptic_connection, ne, ni, lambd_w, lambd_std): """ Args: synaptic_connection - Type of sysnpatic connection (EE,EI or IE) ne - Number of excitatory units ni - Number of inhibitory units lambd_w - Average number of incoming connections lambd_std - Standard deviation of average number of connections per neuron Returns: connection_weights - Weight matrix """ if synaptic_connection == 'EE': """Choose random lamda connections per neuron""" # Draw normally distributed ne integers with mean lambd_w lambdas_incoming = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int) # lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int) # List of neurons list_neurons = list(range(ne)) # Connection weights connection_weights = np.zeros((ne, ne)) # For each lambd value in the above list, # generate weights for incoming and outgoing connections # -------------Gaussian Distribution of weights -------------- # weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution # Centered around 2 to make all values positive # ------------Uniform Distribution -------------------------- global_incoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming)) # Index Counter global_incoming_weights_idx = 0 # Choose the neurons in order [0 to 199] for neuron in list_neurons: # Choose ramdom unique (lambdas[neuron]) neurons from list_neurons possible_connections = list_neurons.copy() possible_connections.remove(neuron) # Remove the selected neuron from possible connections i!=j # Choose random presynaptic neurons possible_incoming_connections = random.sample(possible_connections, lambdas_incoming[neuron]) incoming_weights_neuron = global_incoming_weights[ global_incoming_weights_idx:global_incoming_weights_idx + lambdas_incoming[ neuron]] # ---------- Update the connection weight matrix ------------ # Update incoming connection weights for selected 'neuron' for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron): connection_weights[possible_incoming_connections[incoming_idx]][neuron] = incoming_weight global_incoming_weights_idx += lambdas_incoming[neuron] return connection_weights if synaptic_connection == 'EI': """Choose random lamda connections per neuron""" # Draw normally distributed ni integers with mean lambd_w lambdas = norm.ppf(np.random.random(ni), loc=lambd_w, scale=lambd_std).astype(int) # List of neurons list_neurons = list(range(ni)) # Each i can connect with random ne neurons # Initializing connection weights variable connection_weights = np.zeros((ni, ne)) # ------------Uniform Distribution ----------------------------- global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas)) # Index Counter global_outgoing_weights_idx = 0 # Choose the neurons in order [0 to 40] for neuron in list_neurons: # Choose random unique (lambdas[neuron]) neurons from list_neurons possible_connections = list(range(ne)) possible_outgoing_connections = random.sample(possible_connections, lambdas[ neuron]) # possible_outgoing connections to the neuron # Update weights outgoing_weights = global_outgoing_weights[ global_outgoing_weights_idx:global_outgoing_weights_idx + lambdas[neuron]] # ---------- Update the connection weight matrix ------------ # Update outgoing connections for the neuron for outgoing_idx, outgoing_weight in enumerate( outgoing_weights): # Update the columns in the connection matrix connection_weights[neuron][possible_outgoing_connections[outgoing_idx]] = outgoing_weight # Update the global weight values index global_outgoing_weights_idx += lambdas[neuron] return connection_weights
Python
def prune_small_weights(weights, cutoff_weight): """ Prune the connections with negative connection strength""" weights[weights <= cutoff_weight] = cutoff_weight return weights
def prune_small_weights(weights, cutoff_weight): """ Prune the connections with negative connection strength""" weights[weights <= cutoff_weight] = cutoff_weight return weights
Python
def white_gaussian_noise(mu, sigma, t): """Generates white gaussian noise with mean mu, standard deviation sigma and the noise length equals t """ noise = np.random.normal(mu, sigma, t) return np.expand_dims(noise, 1)
def white_gaussian_noise(mu, sigma, t): """Generates white gaussian noise with mean mu, standard deviation sigma and the noise length equals t """ noise = np.random.normal(mu, sigma, t) return np.expand_dims(noise, 1)
Python
def network_connection_dynamics(connection_counts, savefig): """Args: :param connection_counts(array) - 1D Array of number of connections in the network per time step :param savefig(bool) - If True plot will be saved as png file in the cwd Returns: plot object""" # Plot graph for entire simulation time period fig1, ax1 = plt.subplots(figsize=(12, 5)) ax1.plot(connection_counts, label='Connection dynamics') plt.margins(x=0) ax1.set_xticks(ax1.get_xticks()[::2]) ax1.set_title("Network connection dynamics") plt.ylabel('Number of active connections') plt.xlabel('Time step') plt.legend(loc='upper right') plt.tight_layout() # Inset plot for initial simulation steps ax2 = plt.axes([0, 0, 1, 1]) # Set the position and relative size of the inset axes within ax1 ip = InsetPosition(ax1, [0.25, 0.4, 0.3, 0.3]) ax2.set_axes_locator(ip) ax2.plot(connection_counts[0:10000]) plt.margins(x=0) ax2.set_title('Initial 10000 time steps of Decay Phase') ax2.set_xticks(ax2.get_xticks()[::2]) # End Inset plot ax3 = plt.axes([0, 0, 0, 0]) # Set the position and relative size of the inset axes within ax1 ip1 = InsetPosition(ax1, [0.6, 0.4, 0.3, 0.3]) ax3.set_axes_locator(ip1) # Plot the last 10000 time steps ax3.plot(connection_counts[-10000:]) plt.margins(x=0) ax3.set_title('Final 10000 time steps of Stable Phase') ax3.set_xticks(ax3.get_xticks()[::1]) # Uncomment to show decay and stable phase in colors # ax1.axvspan(0, 200000, alpha=0.1, color='red') # ax2.axvspan(0, 10000, alpha=0.1, color='red') # ax1.axvspan(200000, 1000000, alpha=0.1, color='green') if savefig: plt.savefig('connection_dynamics') return plt.show()
def network_connection_dynamics(connection_counts, savefig): """Args: :param connection_counts(array) - 1D Array of number of connections in the network per time step :param savefig(bool) - If True plot will be saved as png file in the cwd Returns: plot object""" # Plot graph for entire simulation time period fig1, ax1 = plt.subplots(figsize=(12, 5)) ax1.plot(connection_counts, label='Connection dynamics') plt.margins(x=0) ax1.set_xticks(ax1.get_xticks()[::2]) ax1.set_title("Network connection dynamics") plt.ylabel('Number of active connections') plt.xlabel('Time step') plt.legend(loc='upper right') plt.tight_layout() # Inset plot for initial simulation steps ax2 = plt.axes([0, 0, 1, 1]) # Set the position and relative size of the inset axes within ax1 ip = InsetPosition(ax1, [0.25, 0.4, 0.3, 0.3]) ax2.set_axes_locator(ip) ax2.plot(connection_counts[0:10000]) plt.margins(x=0) ax2.set_title('Initial 10000 time steps of Decay Phase') ax2.set_xticks(ax2.get_xticks()[::2]) # End Inset plot ax3 = plt.axes([0, 0, 0, 0]) # Set the position and relative size of the inset axes within ax1 ip1 = InsetPosition(ax1, [0.6, 0.4, 0.3, 0.3]) ax3.set_axes_locator(ip1) # Plot the last 10000 time steps ax3.plot(connection_counts[-10000:]) plt.margins(x=0) ax3.set_title('Final 10000 time steps of Stable Phase') ax3.set_xticks(ax3.get_xticks()[::1]) # Uncomment to show decay and stable phase in colors # ax1.axvspan(0, 200000, alpha=0.1, color='red') # ax2.axvspan(0, 10000, alpha=0.1, color='red') # ax1.axvspan(200000, 1000000, alpha=0.1, color='green') if savefig: plt.savefig('connection_dynamics') return plt.show()
Python
def scale_dependent_smoothness_measure(firing_rates): # Smaller values corresponds to smoother series """ Args: firing_rates - List of number of active neurons per time step Returns: sd_diff - Float value signifies the smoothness of the semantic changes in firing rates """ diff = np.diff(firing_rates) sd_diff = np.std(diff) return sd_diff
def scale_dependent_smoothness_measure(firing_rates): # Smaller values corresponds to smoother series """ Args: firing_rates - List of number of active neurons per time step Returns: sd_diff - Float value signifies the smoothness of the semantic changes in firing rates """ diff = np.diff(firing_rates) sd_diff = np.std(diff) return sd_diff
Python
def scale_independent_smoothness_measure(firing_rates): # Smaller values corresponds to smoother series """ Args: firing_rates - List of number of active neurons per time step Returns: coeff_var - Float value signifies the smoothness of the semantic changes in firing rates """ diff = np.diff(firing_rates) mean_diff = np.mean(diff) sd_diff = np.std(diff) coeff_var = sd_diff / abs(mean_diff) return coeff_var
def scale_independent_smoothness_measure(firing_rates): # Smaller values corresponds to smoother series """ Args: firing_rates - List of number of active neurons per time step Returns: coeff_var - Float value signifies the smoothness of the semantic changes in firing rates """ diff = np.diff(firing_rates) mean_diff = np.mean(diff) sd_diff = np.std(diff) coeff_var = sd_diff / abs(mean_diff) return coeff_var
Python
def spike_times(spike_train): """ Get the time instants at which neuron spikes""" times = np.where(spike_train == 1.) return times
def spike_times(spike_train): """ Get the time instants at which neuron spikes""" times = np.where(spike_train == 1.) return times
Python
def fanofactor(spike_train,neuron,window_size): """Investigate whether neuronal spike generation is a poisson process""" # Choose activity of random neuron neuron_act = spike_train[:, neuron] # Divide total observations into 'tws' time windows of size 'ws' for a neuron 60 tws = np.split(neuron_act, window_size) fr = [] for i in range(len(tws)): fr.append(np.count_nonzero(tws[i])) # print('Firing rate of the neuron during each time window of size %s is %s' %(ws,fr)) mean_firing_rate = np.mean(fr) variance_firing_rate = np.var(fr) fano_factor = variance_firing_rate / mean_firing_rate return mean_firing_rate, variance_firing_rate, fano_factor
def fanofactor(spike_train,neuron,window_size): """Investigate whether neuronal spike generation is a poisson process""" # Choose activity of random neuron neuron_act = spike_train[:, neuron] # Divide total observations into 'tws' time windows of size 'ws' for a neuron 60 tws = np.split(neuron_act, window_size) fr = [] for i in range(len(tws)): fr.append(np.count_nonzero(tws[i])) # print('Firing rate of the neuron during each time window of size %s is %s' %(ws,fr)) mean_firing_rate = np.mean(fr) variance_firing_rate = np.var(fr) fano_factor = variance_firing_rate / mean_firing_rate return mean_firing_rate, variance_firing_rate, fano_factor
Python
def stdp(self, wee, x, cutoff_weights): """ Apply STDP rule : Regulates synaptic strength between the pre(Xj) and post(Xi) synaptic neurons""" x = np.asarray(x) xt_1 = x[:, 0] xt = x[:, 1] wee_t = wee.copy() # STDP applies only on the neurons which are connected. for i in range(len(wee_t[0])): # Each neuron i, Post-synaptic neuron for j in range(len(wee_t[0:])): # Incoming connection from jth pre-synaptic neuron to ith neuron if wee_t[j][i] != 0.: # Check connectivity # Get the change in weight delta_wee_t = self.eta_stdp * (xt[i] * xt_1[j] - xt_1[i] * xt[j]) # Update the weight between jth neuron to i ""Different from notation in article wee_t[j][i] = wee[j][i] + delta_wee_t """ Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight""" wee_t = initializer.prune_small_weights(wee_t, cutoff_weights[0]) """Check and set all weights < upper cutoff weight """ wee_t = initializer.set_max_cutoff_weight(wee_t, cutoff_weights[1]) return wee_t
def stdp(self, wee, x, cutoff_weights): """ Apply STDP rule : Regulates synaptic strength between the pre(Xj) and post(Xi) synaptic neurons""" x = np.asarray(x) xt_1 = x[:, 0] xt = x[:, 1] wee_t = wee.copy() # STDP applies only on the neurons which are connected. for i in range(len(wee_t[0])): # Each neuron i, Post-synaptic neuron for j in range(len(wee_t[0:])): # Incoming connection from jth pre-synaptic neuron to ith neuron if wee_t[j][i] != 0.: # Check connectivity # Get the change in weight delta_wee_t = self.eta_stdp * (xt[i] * xt_1[j] - xt_1[i] * xt[j]) # Update the weight between jth neuron to i ""Different from notation in article wee_t[j][i] = wee[j][i] + delta_wee_t """ Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight""" wee_t = initializer.prune_small_weights(wee_t, cutoff_weights[0]) """Check and set all weights < upper cutoff weight """ wee_t = initializer.set_max_cutoff_weight(wee_t, cutoff_weights[1]) return wee_t
Python
def ip(self, te, x): # IP rule: Active unit increases its threshold and inactive decreases its threshold. xt = x[:, 1] te_update = te + self.eta_ip * (xt.reshape(self.ne, 1) - self.h_ip) """ Check whether all te are in range [0.0,1.0] and update acordingly""" # Update te < 0.0 ---> 0.0 # te_update = prune_small_weights(te_update,self.te_min) # Set all te > 1.0 --> 1.0 # te_update = set_max_cutoff_weight(te_update,self.te_max) return te_update
def ip(self, te, x): # IP rule: Active unit increases its threshold and inactive decreases its threshold. xt = x[:, 1] te_update = te + self.eta_ip * (xt.reshape(self.ne, 1) - self.h_ip) """ Check whether all te are in range [0.0,1.0] and update acordingly""" # Update te < 0.0 ---> 0.0 # te_update = prune_small_weights(te_update,self.te_min) # Set all te > 1.0 --> 1.0 # te_update = set_max_cutoff_weight(te_update,self.te_max) return te_update
Python
def istdp(self, wei, x, y, cutoff_weights): # Apply iSTDP rule : Regulates synaptic strength between the pre(Yj) and post(Xi) synaptic neurons # Excitatory network activity x = np.asarray(x) # Array sanity check xt_1 = x[:, 0] xt = x[:, 1] # Inhibitory network activity y = np.asarray(y) yt_1 = y[:, 0] yt = y[:, 1] # iSTDP applies only on the neurons which are connected. wei_t = wei.copy() for i in range(len(wei_t[0])): # Each neuron i, Post-synaptic neuron: means for each column; for j in range(len(wei_t[0:])): # Incoming connection from j, pre-synaptic neuron to ith neuron if wei_t[j][i] != 0.: # Check connectivity # Get the change in weight delta_wei_t = - self.eta_inhib * yt_1[j] * (1 - xt[i] * (1 + 1 / self.mu_ip)) # Update the weight between jth neuron to i ""Different from notation in article wei_t[j][i] = wei[j][i] + delta_wei_t """ Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight""" wei_t = initializer.prune_small_weights(wei_t, cutoff_weights[0]) """Check and set all weights < upper cutoff weight """ wei_t = initializer.set_max_cutoff_weight(wei_t, cutoff_weights[1]) return wei_t
def istdp(self, wei, x, y, cutoff_weights): # Apply iSTDP rule : Regulates synaptic strength between the pre(Yj) and post(Xi) synaptic neurons # Excitatory network activity x = np.asarray(x) # Array sanity check xt_1 = x[:, 0] xt = x[:, 1] # Inhibitory network activity y = np.asarray(y) yt_1 = y[:, 0] yt = y[:, 1] # iSTDP applies only on the neurons which are connected. wei_t = wei.copy() for i in range(len(wei_t[0])): # Each neuron i, Post-synaptic neuron: means for each column; for j in range(len(wei_t[0:])): # Incoming connection from j, pre-synaptic neuron to ith neuron if wei_t[j][i] != 0.: # Check connectivity # Get the change in weight delta_wei_t = - self.eta_inhib * yt_1[j] * (1 - xt[i] * (1 + 1 / self.mu_ip)) # Update the weight between jth neuron to i ""Different from notation in article wei_t[j][i] = wei[j][i] + delta_wei_t """ Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight""" wei_t = initializer.prune_small_weights(wei_t, cutoff_weights[0]) """Check and set all weights < upper cutoff weight """ wei_t = initializer.set_max_cutoff_weight(wei_t, cutoff_weights[1]) return wei_t
Python
def structural_plasticity(wee): """ Add new connection value to the smallest weight between excitatory units randomly""" p_c = np.random.randint(0, 10, 1) if p_c == 0: # p_c= 0.1 """ Do structural plasticity """ # Choose the smallest weights randomly from the weight matrix wee indexes = initializer.get_unconnected_indexes(wee) # Choose any idx randomly idx_rand = random.choice(indexes) if idx_rand[0] == idx_rand[1]: idx_rand = random.choice(indexes) wee[idx_rand[0]][idx_rand[1]] = 0.001 return wee
def structural_plasticity(wee): """ Add new connection value to the smallest weight between excitatory units randomly""" p_c = np.random.randint(0, 10, 1) if p_c == 0: # p_c= 0.1 """ Do structural plasticity """ # Choose the smallest weights randomly from the weight matrix wee indexes = initializer.get_unconnected_indexes(wee) # Choose any idx randomly idx_rand = random.choice(indexes) if idx_rand[0] == idx_rand[1]: idx_rand = random.choice(indexes) wee[idx_rand[0]][idx_rand[1]] = 0.001 return wee
Python
def excitatory_network_state(self, wee, wei, te, x, y, white_noise_e): """ Activity of Excitatory neurons in the network""" xt = x[:, 1] xt = xt.reshape(self.ne, 1) yt = y[:, 1] yt = yt.reshape(self.ni, 1) incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wee, activity_vector=xt), 1) incoming_drive_i = np.expand_dims(self.incoming_drive(weights=wei, activity_vector=yt), 1) tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e + np.asarray(self.v_t) - te """Heaviside step function""" heaviside_step = [0] * len(tot_incoming_drive) for t in range(len(tot_incoming_drive)): heaviside_step[t] = 0.0 if tot_incoming_drive[t] < te[t] else 1.0 xt_next = np.asarray(heaviside_step.copy()) return xt_next
def excitatory_network_state(self, wee, wei, te, x, y, white_noise_e): """ Activity of Excitatory neurons in the network""" xt = x[:, 1] xt = xt.reshape(self.ne, 1) yt = y[:, 1] yt = yt.reshape(self.ni, 1) incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wee, activity_vector=xt), 1) incoming_drive_i = np.expand_dims(self.incoming_drive(weights=wei, activity_vector=yt), 1) tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e + np.asarray(self.v_t) - te """Heaviside step function""" heaviside_step = [0] * len(tot_incoming_drive) for t in range(len(tot_incoming_drive)): heaviside_step[t] = 0.0 if tot_incoming_drive[t] < te[t] else 1.0 xt_next = np.asarray(heaviside_step.copy()) return xt_next
Python
def recurrent_drive(self, wee, wei, te, x, y, white_noise_e): """Network state due to recurrent drive received by the each unit at time t+1""" xt = x[:, 1] xt = xt.reshape(self.ne, 1) yt = y[:, 1] yt = yt.reshape(self.ni, 1) incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wee, activity_vector=xt), 1) incoming_drive_i = np.expand_dims(self.incoming_drive(weights=wei, activity_vector=yt), 1) tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e - te """Heaviside step function""" heaviside_step = [0] * len(tot_incoming_drive) for t in range(len(tot_incoming_drive)): heaviside_step[t] = 0.0 if tot_incoming_drive[t] < te[t] else 1.0 xt_next = np.asarray(heaviside_step.copy()) return xt_next
def recurrent_drive(self, wee, wei, te, x, y, white_noise_e): """Network state due to recurrent drive received by the each unit at time t+1""" xt = x[:, 1] xt = xt.reshape(self.ne, 1) yt = y[:, 1] yt = yt.reshape(self.ni, 1) incoming_drive_e = np.expand_dims(self.incoming_drive(weights=wee, activity_vector=xt), 1) incoming_drive_i = np.expand_dims(self.incoming_drive(weights=wei, activity_vector=yt), 1) tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e - te """Heaviside step function""" heaviside_step = [0] * len(tot_incoming_drive) for t in range(len(tot_incoming_drive)): heaviside_step[t] = 0.0 if tot_incoming_drive[t] < te[t] else 1.0 xt_next = np.asarray(heaviside_step.copy()) return xt_next
Python
def find_root_by_key(haystack, needle, return_key=None, _inherited_key=None, _depth=0, _return_value=None): ''' Searches needle in haystack ( could be dict, list, list of dicts, nested dicts, etc. ) and returns the root key that has this needle somewhere within it's children. :param haystack: dict, list :param needle: search key :param return_key: if this is given, then the result will be the root_key[return_key] instead of root_key :param _inherited_key: internal usage, do not pass this. :param _depth: internal usage, do not pass this. :param _return_value: internal usage, do not pass this. :return: ''' found = list() if isinstance(haystack, dict): for key, value in haystack.items(): if not _depth: _inherited_key = key _return_value = key if not return_key else haystack[_inherited_key].get(return_key, _inherited_key) if key.lower() == needle.lower(): found.append(_return_value) else: found.extend(find_root_by_key(value, needle, return_key, _inherited_key, _depth+1, _return_value)) elif isinstance(haystack, list) and _inherited_key is not None: for value in haystack: found.extend(find_root_by_key(value, needle, return_key, _inherited_key, _depth+1, _return_value)) else: return [] return list(set(found))
def find_root_by_key(haystack, needle, return_key=None, _inherited_key=None, _depth=0, _return_value=None): ''' Searches needle in haystack ( could be dict, list, list of dicts, nested dicts, etc. ) and returns the root key that has this needle somewhere within it's children. :param haystack: dict, list :param needle: search key :param return_key: if this is given, then the result will be the root_key[return_key] instead of root_key :param _inherited_key: internal usage, do not pass this. :param _depth: internal usage, do not pass this. :param _return_value: internal usage, do not pass this. :return: ''' found = list() if isinstance(haystack, dict): for key, value in haystack.items(): if not _depth: _inherited_key = key _return_value = key if not return_key else haystack[_inherited_key].get(return_key, _inherited_key) if key.lower() == needle.lower(): found.append(_return_value) else: found.extend(find_root_by_key(value, needle, return_key, _inherited_key, _depth+1, _return_value)) elif isinstance(haystack, list) and _inherited_key is not None: for value in haystack: found.extend(find_root_by_key(value, needle, return_key, _inherited_key, _depth+1, _return_value)) else: return [] return list(set(found))
Python
def generate_gallery_rst(app): """Starts the gallery configuration and recursively scans the examples directory in order to populate the examples gallery """ try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not plot_gallery: return gallery_conf.update(app.config.sphinxgallery_conf) # this assures I can call the config in other places app.config.sphinxgallery_conf = gallery_conf examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir']) gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir']) mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir']) for workdir in [examples_dir, gallery_dir, mod_examples_dir]: if not os.path.exists(workdir): os.makedirs(workdir) # we create an index.rst with all examples fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w') fhindex.write(""" .. _examples-index: Gallery of Examples =================== """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. seen_backrefs = set() generate_dir_rst('.', fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs) for directory in sorted(os.listdir(examples_dir)): if os.path.isdir(os.path.join(examples_dir, directory)): generate_dir_rst(directory, fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs) fhindex.flush()
def generate_gallery_rst(app): """Starts the gallery configuration and recursively scans the examples directory in order to populate the examples gallery """ try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not plot_gallery: return gallery_conf.update(app.config.sphinxgallery_conf) # this assures I can call the config in other places app.config.sphinxgallery_conf = gallery_conf examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir']) gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir']) mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir']) for workdir in [examples_dir, gallery_dir, mod_examples_dir]: if not os.path.exists(workdir): os.makedirs(workdir) # we create an index.rst with all examples fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w') fhindex.write(""" .. _examples-index: Gallery of Examples =================== """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. seen_backrefs = set() generate_dir_rst('.', fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs) for directory in sorted(os.listdir(examples_dir)): if os.path.isdir(os.path.join(examples_dir, directory)): generate_dir_rst(directory, fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs) fhindex.flush()
Python
def check_niimg(niimg, atleast_4d=False): """Check that niimg is a proper 3D/4D niimg. Turn filenames into objects. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. atleast_4d: boolean, optional Indicates if a 3d image should be turned into a single-scan 4d niimg. Returns ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. """ # If it's an iterator, it's a 4d image if hasattr(niimg, "__iter__") and not isinstance(niimg, _basestring): return check_niimg_4d(niimg) # Otherwise, it should be a filename or a SpatialImage, we load it niimg = load_niimg(niimg) if atleast_4d and len(niimg.shape) == 3: data = niimg.get_data().view() data.shape = data.shape + (1, ) niimg = new_img_like(niimg, data, niimg.get_affine()) return niimg
def check_niimg(niimg, atleast_4d=False): """Check that niimg is a proper 3D/4D niimg. Turn filenames into objects. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. atleast_4d: boolean, optional Indicates if a 3d image should be turned into a single-scan 4d niimg. Returns ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. """ # If it's an iterator, it's a 4d image if hasattr(niimg, "__iter__") and not isinstance(niimg, _basestring): return check_niimg_4d(niimg) # Otherwise, it should be a filename or a SpatialImage, we load it niimg = load_niimg(niimg) if atleast_4d and len(niimg.shape) == 3: data = niimg.get_data().view() data.shape = data.shape + (1, ) niimg = new_img_like(niimg, data, niimg.get_affine()) return niimg
Python
def check_niimg_3d(niimg): """Check that niimg is a proper 3D niimg-like object and load it. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- result: 3D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. """ niimg = load_niimg(niimg) shape = niimg.shape if len(shape) == 3: pass elif (len(shape) == 4 and shape[3] == 1): # "squeeze" the image. data = _safe_get_data(niimg) affine = niimg.get_affine() niimg = new_img_like(niimg, data[:, :, :, 0], affine) else: raise TypeError("A 3D image is expected, but an image " "with a shape of %s was given." % (shape, )) return niimg
def check_niimg_3d(niimg): """Check that niimg is a proper 3D niimg-like object and load it. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- result: 3D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. """ niimg = load_niimg(niimg) shape = niimg.shape if len(shape) == 3: pass elif (len(shape) == 4 and shape[3] == 1): # "squeeze" the image. data = _safe_get_data(niimg) affine = niimg.get_affine() niimg = new_img_like(niimg, data[:, :, :, 0], affine) else: raise TypeError("A 3D image is expected, but an image " "with a shape of %s was given." % (shape, )) return niimg
Python
def concat_niimgs(niimgs, dtype=np.float32, accept_4d=False, auto_resample=False, verbose=0, memory=Memory(cachedir=None), memory_level=0): """Concatenate a list of 3D/4D niimgs of varying lengths. The niimgs list can contain niftis/paths to images of varying dimensions (i.e., 3D or 4D) as well as different 3D shapes and affines, as they will be matched to the first image in the list if auto_resample=True. Parameters ---------- niimgs: iterable of Niimg-like objects See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Niimgs to concatenate. dtype: numpy dtype, optional the dtype of the returned image accept_4d: boolean, optional Accept 4D images auto_resample: boolean Converts all images to the space of the first one. verbose: int Controls the amount of verbosity (0 means no messages). memory : instance of joblib.Memory or string Used to cache the resampling process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. Returns ------- concatenated: nibabel.Nifti1Image A single image. """ # get properties from first image try: first_niimg = check_niimg(next(iter(niimgs)), atleast_4d=True) except StopIteration: raise TypeError('Cannot concatenate empty objects') target_affine = first_niimg.get_affine() first_data = first_niimg.get_data() target_item_shape = first_niimg.shape[:3] # skip 4th/time dimension # count how many images we have in all (might be list of different 4D's) lengths = [] for index, niimg in enumerate(niimgs): this_shape = check_niimg(niimg).shape if len(this_shape) == 3: lengths.append(1) else: if not accept_4d: if (isinstance(niimg, _basestring)): i_error = "Image " + niimg else: i_error = "Image #" + str(index) raise ValueError("%s is a 4D shape (shape: %s), but this " "function accepts only 3D images" % (i_error, this_shape)) lengths.append(this_shape[3]) # Using fortran order makes concatenation much faster than with C order, # because the voxels for a given image are grouped together in memory. data = np.ndarray(target_item_shape + (sum(lengths), ), order="F", dtype=dtype) data[..., :lengths[0]] = first_data cur_4d_index = 0 for index, (iter_niimg, size) in enumerate(zip(niimgs, lengths)): # talk to user if (isinstance(iter_niimg, _basestring)): nii_str = "image " + iter_niimg else: nii_str = "image #" + str(index) if verbose > 0: print("Concatenating {0}/{1}: {2}".format(index + 1, sum(lengths), nii_str)) if index == 0: # we have already loaded the first one cur_4d_index += size continue niimg = check_niimg(iter_niimg, atleast_4d=True) if (np.array_equal(niimg.get_affine(), target_affine) and target_item_shape == niimg.shape[:3]): this_data = niimg.get_data() else: if not auto_resample: raise ValueError("Affine of %s is different" " from reference affine" "\nReference affine:\n%r\n" "Wrong affine:\n%r" % (nii_str, target_affine, niimg.get_affine())) if verbose > 0: print("...resampled to first nifti!") from .. import image # we avoid a circular import niimg = cache(image.resample_img, memory, func_memory_level=2, memory_level=memory_level)( niimg, target_affine=target_affine, target_shape=target_item_shape) this_data = niimg.get_data() data[..., cur_4d_index:cur_4d_index + size] = this_data cur_4d_index += size return new_img_like(first_niimg, data, target_affine)
def concat_niimgs(niimgs, dtype=np.float32, accept_4d=False, auto_resample=False, verbose=0, memory=Memory(cachedir=None), memory_level=0): """Concatenate a list of 3D/4D niimgs of varying lengths. The niimgs list can contain niftis/paths to images of varying dimensions (i.e., 3D or 4D) as well as different 3D shapes and affines, as they will be matched to the first image in the list if auto_resample=True. Parameters ---------- niimgs: iterable of Niimg-like objects See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Niimgs to concatenate. dtype: numpy dtype, optional the dtype of the returned image accept_4d: boolean, optional Accept 4D images auto_resample: boolean Converts all images to the space of the first one. verbose: int Controls the amount of verbosity (0 means no messages). memory : instance of joblib.Memory or string Used to cache the resampling process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. Returns ------- concatenated: nibabel.Nifti1Image A single image. """ # get properties from first image try: first_niimg = check_niimg(next(iter(niimgs)), atleast_4d=True) except StopIteration: raise TypeError('Cannot concatenate empty objects') target_affine = first_niimg.get_affine() first_data = first_niimg.get_data() target_item_shape = first_niimg.shape[:3] # skip 4th/time dimension # count how many images we have in all (might be list of different 4D's) lengths = [] for index, niimg in enumerate(niimgs): this_shape = check_niimg(niimg).shape if len(this_shape) == 3: lengths.append(1) else: if not accept_4d: if (isinstance(niimg, _basestring)): i_error = "Image " + niimg else: i_error = "Image #" + str(index) raise ValueError("%s is a 4D shape (shape: %s), but this " "function accepts only 3D images" % (i_error, this_shape)) lengths.append(this_shape[3]) # Using fortran order makes concatenation much faster than with C order, # because the voxels for a given image are grouped together in memory. data = np.ndarray(target_item_shape + (sum(lengths), ), order="F", dtype=dtype) data[..., :lengths[0]] = first_data cur_4d_index = 0 for index, (iter_niimg, size) in enumerate(zip(niimgs, lengths)): # talk to user if (isinstance(iter_niimg, _basestring)): nii_str = "image " + iter_niimg else: nii_str = "image #" + str(index) if verbose > 0: print("Concatenating {0}/{1}: {2}".format(index + 1, sum(lengths), nii_str)) if index == 0: # we have already loaded the first one cur_4d_index += size continue niimg = check_niimg(iter_niimg, atleast_4d=True) if (np.array_equal(niimg.get_affine(), target_affine) and target_item_shape == niimg.shape[:3]): this_data = niimg.get_data() else: if not auto_resample: raise ValueError("Affine of %s is different" " from reference affine" "\nReference affine:\n%r\n" "Wrong affine:\n%r" % (nii_str, target_affine, niimg.get_affine())) if verbose > 0: print("...resampled to first nifti!") from .. import image # we avoid a circular import niimg = cache(image.resample_img, memory, func_memory_level=2, memory_level=memory_level)( niimg, target_affine=target_affine, target_shape=target_item_shape) this_data = niimg.get_data() data[..., cur_4d_index:cur_4d_index + size] = this_data cur_4d_index += size return new_img_like(first_niimg, data, target_affine)
Python
def check_niimg_4d(niimgs, return_iterator=False): """Check that niimg is a proper 4D niimg-like object and load it. Parameters ---------- niimgs: 4D Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimgs is an iterable, checks if data is really 4D. Then, considering that it is a list of niimg and load them one by one. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data and get_affine methods are present, raise an Exception otherwise. return_iterator: boolean If True, an iterator of 3D images is returned. This reduces the memory usage when `niimgs` contains 3D images. If False, a single 4D image is returned. When `niimgs` contains 3D images they are concatenated together. Returns ------- niimg: 4D nibabel.Nifti1Image or iterator of 3D nibabel.Nifti1Image Notes ----- This function is the equivalent to check_niimg_3d() for Niimg-like objects with a session level. Its application is idempotent. """ # Two types of input: # * 4D nifti image # * list of 3D niimgs # We get rid of the 4D nifti image case, as it is the simplest case # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug # See http://bugs.python.org/issue7624 if isinstance(niimgs, _basestring) or not hasattr(niimgs, "__iter__"): niimgs = load_niimg(niimgs) shape = niimgs.shape if len(shape) != 4: raise TypeError( "Data must be a 4D Niimg-like object but you provided an " "image of shape %s. See " "http://nilearn.github.io/building_blocks/" "manipulating_mr_images.html#niimg." % (shape, )) if return_iterator: return (_index_niimgs(niimgs, i) for i in range(shape[3])) else: return niimgs # We now have 3 types of input: # * a true list # * an iterator # * a generator # To be iterator/generator friendly, we externalize the verifications in # an iterator. if return_iterator: return _iter_check_niimg_4d(niimgs) return concat_niimgs(niimgs)
def check_niimg_4d(niimgs, return_iterator=False): """Check that niimg is a proper 4D niimg-like object and load it. Parameters ---------- niimgs: 4D Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. If niimgs is an iterable, checks if data is really 4D. Then, considering that it is a list of niimg and load them one by one. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data and get_affine methods are present, raise an Exception otherwise. return_iterator: boolean If True, an iterator of 3D images is returned. This reduces the memory usage when `niimgs` contains 3D images. If False, a single 4D image is returned. When `niimgs` contains 3D images they are concatenated together. Returns ------- niimg: 4D nibabel.Nifti1Image or iterator of 3D nibabel.Nifti1Image Notes ----- This function is the equivalent to check_niimg_3d() for Niimg-like objects with a session level. Its application is idempotent. """ # Two types of input: # * 4D nifti image # * list of 3D niimgs # We get rid of the 4D nifti image case, as it is the simplest case # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug # See http://bugs.python.org/issue7624 if isinstance(niimgs, _basestring) or not hasattr(niimgs, "__iter__"): niimgs = load_niimg(niimgs) shape = niimgs.shape if len(shape) != 4: raise TypeError( "Data must be a 4D Niimg-like object but you provided an " "image of shape %s. See " "http://nilearn.github.io/building_blocks/" "manipulating_mr_images.html#niimg." % (shape, )) if return_iterator: return (_index_niimgs(niimgs, i) for i in range(shape[3])) else: return niimgs # We now have 3 types of input: # * a true list # * an iterator # * a generator # To be iterator/generator friendly, we externalize the verifications in # an iterator. if return_iterator: return _iter_check_niimg_4d(niimgs) return concat_niimgs(niimgs)
Python
def split_multi_scale(y, y_shape): """ Split data into 4 original multi_scale images """ yw, yh = y_shape # Index of original image split_index = [yw * yh] # Index of large image split_index.append(split_index[-1] + (yw - 1) * yh) # Index of tall image split_index.append(split_index[-1] + yw * (yh - 1)) # Index of big image split_index.append(split_index[-1] + (yw - 1) * (yh - 1)) # We split according to computed indices y_preds = np.split(y, split_index, axis=1) # y_pred is the original image y_pred = y_preds[0] # y_pred_tall is the image with 1x2 patch application. We have to make # some calculus to get it back in original shape height_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=-1))[:, :y_cols - 1] * .5 height_tf_i.flat[0] = 1 height_tf_i.flat[-1] = 1 y_pred_tall = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten() for m in y_preds[1]] y_pred_tall = np.asarray(y_pred_tall) # y_pred_large is the image with 2x1 patch application. We have to make # some calculus to get it back in original shape width_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5 width_tf_i.flat[0] = 1 width_tf_i.flat[-1] = 1 y_pred_large = [np.dot(np.reshape(m, (yw, yh - 1)), width_tf_i).flatten() for m in y_preds[2]] y_pred_large = np.asarray(y_pred_large) # y_pred_big is the image with 2x2 patch application. We use previous # matrices to get it back in original shape y_pred_big = [np.dot(np.reshape(m, (yw - 1, yh - 1)), width_tf_i) for m in y_preds[3]] y_pred_big = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten() for m in y_pred_big] y_pred_big = np.asarray(y_pred_big) return (y_pred, y_pred_tall, y_pred_large, y_pred_big)
def split_multi_scale(y, y_shape): """ Split data into 4 original multi_scale images """ yw, yh = y_shape # Index of original image split_index = [yw * yh] # Index of large image split_index.append(split_index[-1] + (yw - 1) * yh) # Index of tall image split_index.append(split_index[-1] + yw * (yh - 1)) # Index of big image split_index.append(split_index[-1] + (yw - 1) * (yh - 1)) # We split according to computed indices y_preds = np.split(y, split_index, axis=1) # y_pred is the original image y_pred = y_preds[0] # y_pred_tall is the image with 1x2 patch application. We have to make # some calculus to get it back in original shape height_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=-1))[:, :y_cols - 1] * .5 height_tf_i.flat[0] = 1 height_tf_i.flat[-1] = 1 y_pred_tall = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten() for m in y_preds[1]] y_pred_tall = np.asarray(y_pred_tall) # y_pred_large is the image with 2x1 patch application. We have to make # some calculus to get it back in original shape width_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5 width_tf_i.flat[0] = 1 width_tf_i.flat[-1] = 1 y_pred_large = [np.dot(np.reshape(m, (yw, yh - 1)), width_tf_i).flatten() for m in y_preds[2]] y_pred_large = np.asarray(y_pred_large) # y_pred_big is the image with 2x2 patch application. We use previous # matrices to get it back in original shape y_pred_big = [np.dot(np.reshape(m, (yw - 1, yh - 1)), width_tf_i) for m in y_preds[3]] y_pred_big = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten() for m in y_pred_big] y_pred_big = np.asarray(y_pred_big) return (y_pred, y_pred_tall, y_pred_large, y_pred_big)
Python
def ecdf_plot(data, x=None, **kwargs): """ Empirical plot for cumulative distribution function Parameters ---------- data: array or list data for the empirical CDF plot x: array or list (optional) the points to show the plot **kwargs: **kwargs for matplotlib.plot Returns ------- x: array sorted x ecdf_val: values of empirical cdf for x """ data = np.sort(np.array(data)) if x is None: x = data else: x = np.sort(np.array(x)) ecdf_val = np.zeros(len(x)) for i in range(len(x)): ecdf_val[i] = np.mean(data < x[i]) plt.plot(x, ecdf_val, **kwargs) return x, ecdf_val
def ecdf_plot(data, x=None, **kwargs): """ Empirical plot for cumulative distribution function Parameters ---------- data: array or list data for the empirical CDF plot x: array or list (optional) the points to show the plot **kwargs: **kwargs for matplotlib.plot Returns ------- x: array sorted x ecdf_val: values of empirical cdf for x """ data = np.sort(np.array(data)) if x is None: x = data else: x = np.sort(np.array(x)) ecdf_val = np.zeros(len(x)) for i in range(len(x)): ecdf_val[i] = np.mean(data < x[i]) plt.plot(x, ecdf_val, **kwargs) return x, ecdf_val
Python
def read_tokens(self, path): """ Reads the given file line by line and yields the list of tokens present in each line. :param path: :return: """ raise NotImplementedError("Must implement read_tokens")
def read_tokens(self, path): """ Reads the given file line by line and yields the list of tokens present in each line. :param path: :return: """ raise NotImplementedError("Must implement read_tokens")
Python
def read_samples_by_string(self, path): """ Reads the given file line by line and yields the word-form of each derived sample. :param path: :return: """ raise NotImplementedError("Must implement read_samples")
def read_samples_by_string(self, path): """ Reads the given file line by line and yields the word-form of each derived sample. :param path: :return: """ raise NotImplementedError("Must implement read_samples")
Python
def is_unknown_token(self, token): """ True if the given token is out of vocabulary :param token: :return: """ return token not in self.token_2_id or token == self.unknown_token()
def is_unknown_token(self, token): """ True if the given token is out of vocabulary :param token: :return: """ return token not in self.token_2_id or token == self.unknown_token()
Python
def sentence_2_token_ids(self, sentence): """ Convert a sentence to word ids :param sentence: :return: """ return [self.convert_token_2_id(w) for w in sentence.split()]
def sentence_2_token_ids(self, sentence): """ Convert a sentence to word ids :param sentence: :return: """ return [self.convert_token_2_id(w) for w in sentence.split()]
Python
def token_ids_2_tokens(self, word_ids): """ Convert a list of word ids to words :param word_ids: :return: """ return [self.convert_id_2_token(w) for w in word_ids]
def token_ids_2_tokens(self, word_ids): """ Convert a list of word ids to words :param word_ids: :return: """ return [self.convert_id_2_token(w) for w in word_ids]
Python
def read_samples(self, path): """ Read sample of path's data :param path: :return: generate list """ for source_words, target_words in self.read_samples_by_string(path): source = [self.convert_token_2_id(w) for w in source_words] target = [self.convert_token_2_id(w) for w in target_words] target.append(EOS_ID) yield source, target
def read_samples(self, path): """ Read sample of path's data :param path: :return: generate list """ for source_words, target_words in self.read_samples_by_string(path): source = [self.convert_token_2_id(w) for w in source_words] target = [self.convert_token_2_id(w) for w in target_words] target.append(EOS_ID) yield source, target
Python
def evaluate_accuracy(sess, model, data_reader, corrective_tokens, test_path, max_samples=None): """Evaluates the accuracy and BLEU score of the given model.""" import nltk # Loading here to avoid having to bundle it in lambda. # Build a collection of "baseline" and model-based hypotheses, where the # baseline is just the (potentially errant) source sequence. baseline_hypotheses = defaultdict(list) # The model's input model_hypotheses = defaultdict(list) # The actual model's predictions targets = defaultdict(list) # Groundtruth errors = [] n_samples_by_bucket = defaultdict(int) n_correct_model_by_bucket = defaultdict(int) n_correct_baseline_by_bucket = defaultdict(int) n_samples = 0 # Evaluate the model against all samples in the test data set. for source, target in data_reader.read_samples_by_string(test_path): matching_buckets = [i for i, bucket in enumerate(model.buckets) if len(source) < bucket[0]] if not matching_buckets: continue bucket_id = matching_buckets[0] decoding = next( decode(sess, model, data_reader, [source], corrective_tokens=corrective_tokens, verbose=False)) model_hypotheses[bucket_id].append(decoding) if decoding == target: n_correct_model_by_bucket[bucket_id] += 1 else: errors.append((decoding, target)) baseline_hypotheses[bucket_id].append(source) if source == target: n_correct_baseline_by_bucket[bucket_id] += 1 # nltk.corpus_bleu expects a list of one or more reference # translations per sample, so we wrap the target list in another list targets[bucket_id].append([target]) n_samples_by_bucket[bucket_id] += 1 n_samples += 1 if max_samples is not None and n_samples > max_samples: break # Measure the corpus BLEU score and accuracy for the model and baseline # across all buckets. for bucket_id in targets.keys(): baseline_bleu_score = nltk.translate.bleu_score.corpus_bleu( targets[bucket_id], baseline_hypotheses[bucket_id]) model_bleu_score = nltk.translate.bleu_score.corpus_bleu( targets[bucket_id], model_hypotheses[bucket_id]) print("Bucket {}: {}".format(bucket_id, model.buckets[bucket_id])) print("\tBaseline BLEU = {:.4f}\n\tModel BLEU = {:.4f}".format( baseline_bleu_score, model_bleu_score)) print("\tBaseline Accuracy: {:.4f}".format( 1.0 * n_correct_baseline_by_bucket[bucket_id] / n_samples_by_bucket[bucket_id])) print("\tModel Accuracy: {:.4f}".format( 1.0 * n_correct_model_by_bucket[bucket_id] / n_samples_by_bucket[bucket_id])) return errors
def evaluate_accuracy(sess, model, data_reader, corrective_tokens, test_path, max_samples=None): """Evaluates the accuracy and BLEU score of the given model.""" import nltk # Loading here to avoid having to bundle it in lambda. # Build a collection of "baseline" and model-based hypotheses, where the # baseline is just the (potentially errant) source sequence. baseline_hypotheses = defaultdict(list) # The model's input model_hypotheses = defaultdict(list) # The actual model's predictions targets = defaultdict(list) # Groundtruth errors = [] n_samples_by_bucket = defaultdict(int) n_correct_model_by_bucket = defaultdict(int) n_correct_baseline_by_bucket = defaultdict(int) n_samples = 0 # Evaluate the model against all samples in the test data set. for source, target in data_reader.read_samples_by_string(test_path): matching_buckets = [i for i, bucket in enumerate(model.buckets) if len(source) < bucket[0]] if not matching_buckets: continue bucket_id = matching_buckets[0] decoding = next( decode(sess, model, data_reader, [source], corrective_tokens=corrective_tokens, verbose=False)) model_hypotheses[bucket_id].append(decoding) if decoding == target: n_correct_model_by_bucket[bucket_id] += 1 else: errors.append((decoding, target)) baseline_hypotheses[bucket_id].append(source) if source == target: n_correct_baseline_by_bucket[bucket_id] += 1 # nltk.corpus_bleu expects a list of one or more reference # translations per sample, so we wrap the target list in another list targets[bucket_id].append([target]) n_samples_by_bucket[bucket_id] += 1 n_samples += 1 if max_samples is not None and n_samples > max_samples: break # Measure the corpus BLEU score and accuracy for the model and baseline # across all buckets. for bucket_id in targets.keys(): baseline_bleu_score = nltk.translate.bleu_score.corpus_bleu( targets[bucket_id], baseline_hypotheses[bucket_id]) model_bleu_score = nltk.translate.bleu_score.corpus_bleu( targets[bucket_id], model_hypotheses[bucket_id]) print("Bucket {}: {}".format(bucket_id, model.buckets[bucket_id])) print("\tBaseline BLEU = {:.4f}\n\tModel BLEU = {:.4f}".format( baseline_bleu_score, model_bleu_score)) print("\tBaseline Accuracy: {:.4f}".format( 1.0 * n_correct_baseline_by_bucket[bucket_id] / n_samples_by_bucket[bucket_id])) print("\tModel Accuracy: {:.4f}".format( 1.0 * n_correct_model_by_bucket[bucket_id] / n_samples_by_bucket[bucket_id])) return errors
Python
def scale_image(self, newImageW, newImageH): """Scale the image to match passed in target height and width""" self.x = int((newImageW / self.imageW)*self.x) self.y = int((newImageH / self.imageH)*self.y) self.w = int((newImageW / self.imageW)*self.w) self.h = int((newImageH / self.imageH)*self.h) self.imageH = newImageH self.imageW = newImageW #recalculate error values self.calculate_error()
def scale_image(self, newImageW, newImageH): """Scale the image to match passed in target height and width""" self.x = int((newImageW / self.imageW)*self.x) self.y = int((newImageH / self.imageH)*self.y) self.w = int((newImageW / self.imageW)*self.w) self.h = int((newImageH / self.imageH)*self.h) self.imageH = newImageH self.imageW = newImageW #recalculate error values self.calculate_error()
Python
def xavier_initializer_convolution(shape, dist='uniform', lambda_initializer=True): """ Xavier initializer for N-D convolution patches. input_activations = patch_volume * in_channels; output_activations = patch_volume * out_channels; Uniform: lim = sqrt(3/(input_activations + output_activations)) Normal: stddev = sqrt(6/(input_activations + output_activations)) :param shape: The shape of the convolution patch i.e. spatial_shape + [input_channels, output_channels]. The order of input_channels and output_channels is irrelevant, hence this can be used to initialize deconvolution parameters. :param dist: A string either 'uniform' or 'normal' determining the type of distribution :param lambda_initializer: Whether to return the initial actual values of the parameters (True) or placeholders that are initialized when the session is initiated :return: A numpy araray with the initial values for the parameters in the patch """ s = len(shape) - 2 num_activations = np.prod(shape[:s]) * np.sum(shape[s:]) # input_activations + output_activations if dist == 'uniform': lim = np.sqrt(6. / num_activations) if lambda_initializer: return np.random.uniform(-lim, lim, shape).astype(np.float32) else: return tf.random_uniform(shape, minval=-lim, maxval=lim) if dist == 'normal': stddev = np.sqrt(3. / num_activations) if lambda_initializer: return np.random.normal(0, stddev, shape).astype(np.float32) else: tf.truncated_normal(shape, mean=0, stddev=stddev) raise ValueError('Distribution must be either "uniform" or "normal".')
def xavier_initializer_convolution(shape, dist='uniform', lambda_initializer=True): """ Xavier initializer for N-D convolution patches. input_activations = patch_volume * in_channels; output_activations = patch_volume * out_channels; Uniform: lim = sqrt(3/(input_activations + output_activations)) Normal: stddev = sqrt(6/(input_activations + output_activations)) :param shape: The shape of the convolution patch i.e. spatial_shape + [input_channels, output_channels]. The order of input_channels and output_channels is irrelevant, hence this can be used to initialize deconvolution parameters. :param dist: A string either 'uniform' or 'normal' determining the type of distribution :param lambda_initializer: Whether to return the initial actual values of the parameters (True) or placeholders that are initialized when the session is initiated :return: A numpy araray with the initial values for the parameters in the patch """ s = len(shape) - 2 num_activations = np.prod(shape[:s]) * np.sum(shape[s:]) # input_activations + output_activations if dist == 'uniform': lim = np.sqrt(6. / num_activations) if lambda_initializer: return np.random.uniform(-lim, lim, shape).astype(np.float32) else: return tf.random_uniform(shape, minval=-lim, maxval=lim) if dist == 'normal': stddev = np.sqrt(3. / num_activations) if lambda_initializer: return np.random.normal(0, stddev, shape).astype(np.float32) else: tf.truncated_normal(shape, mean=0, stddev=stddev) raise ValueError('Distribution must be either "uniform" or "normal".')
Python
def create_dictionary(filename): """takes a string representing the name of a text file, and that returns a dictionary of key-value pairs """ file = open(filename,'r') text = file.read() file.close() words = text.split() d = {} current_word = '$' for next_word in words: if current_word not in d: d[current_word] = [next_word] else: d[current_word] += [next_word] if next_word[-1] in '.!?': current_word = '$' else: current_word = next_word return d
def create_dictionary(filename): """takes a string representing the name of a text file, and that returns a dictionary of key-value pairs """ file = open(filename,'r') text = file.read() file.close() words = text.split() d = {} current_word = '$' for next_word in words: if current_word not in d: d[current_word] = [next_word] else: d[current_word] += [next_word] if next_word[-1] in '.!?': current_word = '$' else: current_word = next_word return d
Python
def generate_text(word_dict, num_words): """takes as parameters a dictionary of word transitions (generated by the create_dictionary function) named word_dict and a positive integer named num_words. """ current_word = '$' s = '' while num_words >0: next_word = random.choice(word_dict[current_word]) s += next_word num_words -= 1 if num_words > 0: s += ' ' if next_word[-1] in '.!?': current_word = '$' else: current_word = next_word print(s)
def generate_text(word_dict, num_words): """takes as parameters a dictionary of word transitions (generated by the create_dictionary function) named word_dict and a positive integer named num_words. """ current_word = '$' s = '' while num_words >0: next_word = random.choice(word_dict[current_word]) s += next_word num_words -= 1 if num_words > 0: s += ' ' if next_word[-1] in '.!?': current_word = '$' else: current_word = next_word print(s)
Python
def tv_meta(path, dirname): print """ This just assumes the folder is the title :> """ pattern = re.compile(ur'^[^\(]+', re.U) match = re.search(pattern, dirname) title = match.group().strip() print u'Using fixed title: {}'.format(title) omdb_series = omdbapi.search(title) omdb_imdbid = omdb_series.get('imdbID') print u'Found series IMDB id: {}'.format(omdb_imdbid) tvdb_series = thetvdb.remote_id(omdb_imdbid) print u'Got TVDB result: {}'.format(len(tvdb_series)) tvdb_imdbid = '' tvdbid = '' if len(tvdb_series) != 0: tvdb_imdbid = tvdb_series[0].find('IMDB_ID').text tvdbid = tvdb_series[0].find('id').text print u'Found series TVDB id: {}'.format(tvdbid) if tvdb_imdbid != omdb_imdbid: print u'The imdb ids do not match: Name: {}, OMDB: {}, TVDB: {}'\ .format(title, omdb_imdbid, tvdb_imdbid) return dict(path=path, dirname=dirname, title=dirname, \ imdbid=omdb_imdbid, tvdbid=tvdbid)
def tv_meta(path, dirname): print """ This just assumes the folder is the title :> """ pattern = re.compile(ur'^[^\(]+', re.U) match = re.search(pattern, dirname) title = match.group().strip() print u'Using fixed title: {}'.format(title) omdb_series = omdbapi.search(title) omdb_imdbid = omdb_series.get('imdbID') print u'Found series IMDB id: {}'.format(omdb_imdbid) tvdb_series = thetvdb.remote_id(omdb_imdbid) print u'Got TVDB result: {}'.format(len(tvdb_series)) tvdb_imdbid = '' tvdbid = '' if len(tvdb_series) != 0: tvdb_imdbid = tvdb_series[0].find('IMDB_ID').text tvdbid = tvdb_series[0].find('id').text print u'Found series TVDB id: {}'.format(tvdbid) if tvdb_imdbid != omdb_imdbid: print u'The imdb ids do not match: Name: {}, OMDB: {}, TVDB: {}'\ .format(title, omdb_imdbid, tvdb_imdbid) return dict(path=path, dirname=dirname, title=dirname, \ imdbid=omdb_imdbid, tvdbid=tvdbid)
Python
def start_params(self): """ Get the start parameters for the fit Returns ------- values, names : tuple names give the parameters names and values for all the submodels with non-fixed priors """ mod = self.physics_model names = [] values = [] for ckey in mod.keys(): if mod[ckey]["prior"]["name"] != "fixed": for k, cparam in enumerate(mod[ckey]["varnames"]): if ( len(np.atleast_1d(mod[ckey][cparam])) > 1 ): # expand into multiple parameters for ll, cval in enumerate(mod[ckey][cparam]): names.append(f"{ckey}_{cparam}{ll+1}") values.append(cval) else: names.append(f"{ckey}_{cparam}") values.append(mod[ckey][cparam]) return (names, values)
def start_params(self): """ Get the start parameters for the fit Returns ------- values, names : tuple names give the parameters names and values for all the submodels with non-fixed priors """ mod = self.physics_model names = [] values = [] for ckey in mod.keys(): if mod[ckey]["prior"]["name"] != "fixed": for k, cparam in enumerate(mod[ckey]["varnames"]): if ( len(np.atleast_1d(mod[ckey][cparam])) > 1 ): # expand into multiple parameters for ll, cval in enumerate(mod[ckey][cparam]): names.append(f"{ckey}_{cparam}{ll+1}") values.append(cval) else: names.append(f"{ckey}_{cparam}") values.append(mod[ckey][cparam]) return (names, values)
Python
def lnlike(self, phi, star_lnpgriddata, beast_moddata): """ Compute the log(likelihood) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- log(likelihood): float """ # update the values in the physics model using the input ensemble parameters k = 0 cur_physmod = np.full((len(beast_moddata["Av"])), 1.0, dtype=float) for cparam in self.params: cmod = self.physics_model[cparam] if cmod["prior"]["name"] != "fixed": for cvar in cmod["varnames"]: if cmod["nsubvars"] > 1: for j in range(cmod["nsubvars"]): self.physics_model[cparam]["values"][j] = phi[k] k += 1 else: self.physics_model[cparam][cvar] = phi[k] k += 1 # compute the physics model for the full BEAST physics grid cur_physmod *= self.physics_model[cparam]["model"]( beast_moddata[cparam] ) # if cparam == "logA": # print(self.physics_model[cparam]["values"]) n_lnps, n_stars = star_lnpgriddata["indxs"].shape if self.compute_N_stars: # compute the mass multipliers for each age and metallicity if self.compute_massmult: self.massmultipliers = precompute_mass_multipliers( beast_moddata, self.physics_model["M_ini"]["model"] ) print("once") # only compute the massmultipliers the 1st time if the IMF is fixed # saves computation time if self.physics_model["M_ini"]["prior"]["name"] == "fixed": self.compute_massmult = False # compute the expected number of stars based on the current physics model pred_stars = get_predicted_num_stars( self.massmultipliers, beast_moddata, cur_physmod, self.physics_model["logA"]["model"], ) # cacluate the probability of the observed number of stars # ln form based on equation 8 in Weisz et al. (2013, ApJ, 762, 123) logintprob = ( n_stars * np.log(pred_stars) - pred_stars - scipy.special.gammaln(n_stars + 1) ) # print(pred_stars, n_stars, logintprob) else: logintprob = 0.0 # compute the each star's integrated probability that it fits the new model # including the completeness function for i in range(n_stars): # mask for the finite star's lnpgrid values gmask = np.isfinite(star_lnpgriddata["vals"][i]) # indxs for the star's lnpgrid values in the full beast grid curindxs = (star_lnpgriddata["indxs"][i])[gmask] # compute the integrated probability star_intprob = np.sum( (star_lnpgriddata["vals"][i])[gmask] * cur_physmod[curindxs] * beast_moddata["completeness"][curindxs] ) # checks for spoilers if not np.isfinite(star_intprob): raise ValueError("invidual integrated star prob is not finite") if star_intprob == 0.0: raise ValueError("invidual integrated star prob is zero") logintprob += np.log(star_intprob) return logintprob
def lnlike(self, phi, star_lnpgriddata, beast_moddata): """ Compute the log(likelihood) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- log(likelihood): float """ # update the values in the physics model using the input ensemble parameters k = 0 cur_physmod = np.full((len(beast_moddata["Av"])), 1.0, dtype=float) for cparam in self.params: cmod = self.physics_model[cparam] if cmod["prior"]["name"] != "fixed": for cvar in cmod["varnames"]: if cmod["nsubvars"] > 1: for j in range(cmod["nsubvars"]): self.physics_model[cparam]["values"][j] = phi[k] k += 1 else: self.physics_model[cparam][cvar] = phi[k] k += 1 # compute the physics model for the full BEAST physics grid cur_physmod *= self.physics_model[cparam]["model"]( beast_moddata[cparam] ) # if cparam == "logA": # print(self.physics_model[cparam]["values"]) n_lnps, n_stars = star_lnpgriddata["indxs"].shape if self.compute_N_stars: # compute the mass multipliers for each age and metallicity if self.compute_massmult: self.massmultipliers = precompute_mass_multipliers( beast_moddata, self.physics_model["M_ini"]["model"] ) print("once") # only compute the massmultipliers the 1st time if the IMF is fixed # saves computation time if self.physics_model["M_ini"]["prior"]["name"] == "fixed": self.compute_massmult = False # compute the expected number of stars based on the current physics model pred_stars = get_predicted_num_stars( self.massmultipliers, beast_moddata, cur_physmod, self.physics_model["logA"]["model"], ) # cacluate the probability of the observed number of stars # ln form based on equation 8 in Weisz et al. (2013, ApJ, 762, 123) logintprob = ( n_stars * np.log(pred_stars) - pred_stars - scipy.special.gammaln(n_stars + 1) ) # print(pred_stars, n_stars, logintprob) else: logintprob = 0.0 # compute the each star's integrated probability that it fits the new model # including the completeness function for i in range(n_stars): # mask for the finite star's lnpgrid values gmask = np.isfinite(star_lnpgriddata["vals"][i]) # indxs for the star's lnpgrid values in the full beast grid curindxs = (star_lnpgriddata["indxs"][i])[gmask] # compute the integrated probability star_intprob = np.sum( (star_lnpgriddata["vals"][i])[gmask] * cur_physmod[curindxs] * beast_moddata["completeness"][curindxs] ) # checks for spoilers if not np.isfinite(star_intprob): raise ValueError("invidual integrated star prob is not finite") if star_intprob == 0.0: raise ValueError("invidual integrated star prob is zero") logintprob += np.log(star_intprob) return logintprob
Python
def lnprior(self, phi): """ Compute the log(priors) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters megabeast_model : dict MegaBEAST physical model including priors Returns ------- log(prior): floats 0 if allowed -infinite if not allowed """ cmod = self.physics_model k = 0 for cparam in cmod.keys(): cprior = cmod[cparam]["prior"] cname = cprior["name"] if cname != "fixed": if cname == "flat": for i, vparam in enumerate(cmod[cparam]["varnames"]): if cmod[cparam]["nsubvars"] > 1: for j in range(len(np.atleast_1d(cprior["minmax"][i]))): if ( phi[k] < cprior["minmax"][0][j] or phi[k] > cprior["minmax"][1][j] ): return -np.inf k += 1 else: if ( phi[k] < cprior["minmax"][i][0] or phi[k] > cprior["minmax"][i][1] ): return -np.inf k += 1 else: raise ValueError(f"{cname} prior not supported") return 0.0
def lnprior(self, phi): """ Compute the log(priors) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters megabeast_model : dict MegaBEAST physical model including priors Returns ------- log(prior): floats 0 if allowed -infinite if not allowed """ cmod = self.physics_model k = 0 for cparam in cmod.keys(): cprior = cmod[cparam]["prior"] cname = cprior["name"] if cname != "fixed": if cname == "flat": for i, vparam in enumerate(cmod[cparam]["varnames"]): if cmod[cparam]["nsubvars"] > 1: for j in range(len(np.atleast_1d(cprior["minmax"][i]))): if ( phi[k] < cprior["minmax"][0][j] or phi[k] > cprior["minmax"][1][j] ): return -np.inf k += 1 else: if ( phi[k] < cprior["minmax"][i][0] or phi[k] > cprior["minmax"][i][1] ): return -np.inf k += 1 else: raise ValueError(f"{cname} prior not supported") return 0.0
Python
def lnprob(phi, megabeast_model, star_lnpgriddata, beast_moddata): """ Compute the log(probability) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters megabeast_model : class MegaBEAST physical model including priors star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- log(probability): float """ ln_prior = megabeast_model.lnprior(phi) if not np.isfinite(ln_prior): return -np.inf return ln_prior + megabeast_model.lnlike(phi, star_lnpgriddata, beast_moddata)
def lnprob(phi, megabeast_model, star_lnpgriddata, beast_moddata): """ Compute the log(probability) for the ensemble parameters Parameters ---------- phi: floats ensemble parameters megabeast_model : class MegaBEAST physical model including priors star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- log(probability): float """ ln_prior = megabeast_model.lnprior(phi) if not np.isfinite(ln_prior): return -np.inf return ln_prior + megabeast_model.lnlike(phi, star_lnpgriddata, beast_moddata)
Python
def _get_best_fit_params(sampler): """ Determine the best fit parameters given an emcee sampler object """ # very likely a faster way max_lnp = -1e6 nwalkers, nsteps = sampler.lnprobability.shape for k in range(nwalkers): tmax_lnp = np.nanmax(sampler.lnprobability[k]) if tmax_lnp > max_lnp: max_lnp = tmax_lnp (indxs,) = np.where(sampler.lnprobability[k] == tmax_lnp) fit_params_best = sampler.chain[k, indxs[0], :] return fit_params_best
def _get_best_fit_params(sampler): """ Determine the best fit parameters given an emcee sampler object """ # very likely a faster way max_lnp = -1e6 nwalkers, nsteps = sampler.lnprobability.shape for k in range(nwalkers): tmax_lnp = np.nanmax(sampler.lnprobability[k]) if tmax_lnp > max_lnp: max_lnp = tmax_lnp (indxs,) = np.where(sampler.lnprobability[k] == tmax_lnp) fit_params_best = sampler.chain[k, indxs[0], :] return fit_params_best
Python
def fit_ensemble(megabeast_model, star_lnpgriddata, beast_moddata): """ Run the MegaBEAST on a single set of BEAST results. Parameters ---------- megabeast_model : class MegaBEAST physical model including priors star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- fit_results : array set of best fit parameters """ # standard minimization to find initial values def chi2(*args): return -1.0 * lnprob(*args) sparams = megabeast_model.start_params()[1] # result = op.minimize( # result = op.least_squares( # chi2, # sparams, # args=(megabeast_model, star_lnpgriddata, beast_moddata), # ftol=1e-20, # xtol=1e-20 # method="Nelder-Mead", # ) ndim, nwalkers = len(sparams), 5 * len(sparams) pos = sparams * (1.0 + 1e-1 * np.random.randn(nwalkers, ndim)) sampler = emcee.EnsembleSampler( nwalkers, ndim, lnprob, args=(megabeast_model, star_lnpgriddata, beast_moddata) ) nsteps = 100 sampler.run_mcmc(pos, nsteps, progress=True) # samples = sampler.get_chain() # next step would be to # run through MCMC to fully sample likelihood # maybe include option not to run MCMC # print("output") # print(megabeast_model.physics_model) # print(result) return _get_best_fit_params(sampler) # return result["x"]
def fit_ensemble(megabeast_model, star_lnpgriddata, beast_moddata): """ Run the MegaBEAST on a single set of BEAST results. Parameters ---------- megabeast_model : class MegaBEAST physical model including priors star_lnpgriddata: dictonary contains arrays of the likelihood*grid_weight values and indexs to the BEAST model grid beast_moddata: dictonary contains arrays of the beast parameters for the full beast physics grid Returns ------- fit_results : array set of best fit parameters """ # standard minimization to find initial values def chi2(*args): return -1.0 * lnprob(*args) sparams = megabeast_model.start_params()[1] # result = op.minimize( # result = op.least_squares( # chi2, # sparams, # args=(megabeast_model, star_lnpgriddata, beast_moddata), # ftol=1e-20, # xtol=1e-20 # method="Nelder-Mead", # ) ndim, nwalkers = len(sparams), 5 * len(sparams) pos = sparams * (1.0 + 1e-1 * np.random.randn(nwalkers, ndim)) sampler = emcee.EnsembleSampler( nwalkers, ndim, lnprob, args=(megabeast_model, star_lnpgriddata, beast_moddata) ) nsteps = 100 sampler.run_mcmc(pos, nsteps, progress=True) # samples = sampler.get_chain() # next step would be to # run through MCMC to fully sample likelihood # maybe include option not to run MCMC # print("output") # print(megabeast_model.physics_model) # print(result) return _get_best_fit_params(sampler) # return result["x"]
Python
def precompute_mass_multipliers(bphysparams, physmodmass): """ Calculate the value to mulitply the SFR to get the total mass in stars at all ages and masses given the physics model on the BEAST model grid. Parameters ---------- bphysparams : astropy.table table giving the physical parameters, weights, and completeness on the BEAST physical grid physmodmass : beast.physicmodel.priormodel physics model of for initial mass Returns ------- sfhmassinfo : dict "massmult" gives the value to muliply the SFH at each age and metallicity "ages" gives the ages and "Zs" gives the metallicities """ mass_range = [min(bphysparams["M_ini"]), max(bphysparams["M_ini"])] # compute the total mass and average mass of a star given the mass_prior_model nmass = 100 masspts = np.logspace(np.log10(mass_range[0]), np.log10(mass_range[1]), nmass) massprior = physmodmass(masspts) totmass = np.trapz(massprior, masspts) avemass = np.trapz(masspts * massprior, masspts) / totmass # loop over all the ages and compute the mass to simulate # ***need to add metallicity as well*** grid_ages = np.unique(bphysparams["logA"]) bin_boundaries = compute_bin_boundaries(grid_ages) bin_widths = np.diff(10 ** (bin_boundaries)) grid_mets = np.unique(bphysparams["Z"]) massmults = np.full((len(grid_ages), len(grid_mets)), 0.0) for i, cage in enumerate(grid_ages): for j, cmet in enumerate(grid_mets): gmods = (bphysparams["logA"] == cage) & (bphysparams["Z"] == cmet) cur_mass_range = [ min(bphysparams["M_ini"][gmods]), max(bphysparams["M_ini"][gmods]), ] gmass = (masspts >= cur_mass_range[0]) & (masspts <= cur_mass_range[1]) curmasspts = masspts[gmass] curmassprior = massprior[gmass] totcurmass = np.trapz(curmassprior, curmasspts) # compute the mass remaining at each age -> this is the mass to simulate massmults[i, j] = bin_widths[i] * totcurmass / totmass return { "massmult": massmults, "ages": grid_ages, "Zs": grid_mets, "avemass": avemass, }
def precompute_mass_multipliers(bphysparams, physmodmass): """ Calculate the value to mulitply the SFR to get the total mass in stars at all ages and masses given the physics model on the BEAST model grid. Parameters ---------- bphysparams : astropy.table table giving the physical parameters, weights, and completeness on the BEAST physical grid physmodmass : beast.physicmodel.priormodel physics model of for initial mass Returns ------- sfhmassinfo : dict "massmult" gives the value to muliply the SFH at each age and metallicity "ages" gives the ages and "Zs" gives the metallicities """ mass_range = [min(bphysparams["M_ini"]), max(bphysparams["M_ini"])] # compute the total mass and average mass of a star given the mass_prior_model nmass = 100 masspts = np.logspace(np.log10(mass_range[0]), np.log10(mass_range[1]), nmass) massprior = physmodmass(masspts) totmass = np.trapz(massprior, masspts) avemass = np.trapz(masspts * massprior, masspts) / totmass # loop over all the ages and compute the mass to simulate # ***need to add metallicity as well*** grid_ages = np.unique(bphysparams["logA"]) bin_boundaries = compute_bin_boundaries(grid_ages) bin_widths = np.diff(10 ** (bin_boundaries)) grid_mets = np.unique(bphysparams["Z"]) massmults = np.full((len(grid_ages), len(grid_mets)), 0.0) for i, cage in enumerate(grid_ages): for j, cmet in enumerate(grid_mets): gmods = (bphysparams["logA"] == cage) & (bphysparams["Z"] == cmet) cur_mass_range = [ min(bphysparams["M_ini"][gmods]), max(bphysparams["M_ini"][gmods]), ] gmass = (masspts >= cur_mass_range[0]) & (masspts <= cur_mass_range[1]) curmasspts = masspts[gmass] curmassprior = massprior[gmass] totcurmass = np.trapz(curmassprior, curmasspts) # compute the mass remaining at each age -> this is the mass to simulate massmults[i, j] = bin_widths[i] * totcurmass / totmass return { "massmult": massmults, "ages": grid_ages, "Zs": grid_mets, "avemass": avemass, }
Python
def create_naive_maps(stats_filename, pix_size=10.0, verbose=False): """ Make the naive maps by directly averaging the BEAST results for all the stars in each pixel. Does not account for completeness, hence naive maps! Parameters ---------- stats_filename : string or list of strings name(s) of the catalog(s) of BEAST results pix_size : float (default=10) size of pixels/regions in arcsec """ # type of statistic (make a commandline parameter later) # remember to add to output filenames stat_type = "Exp" # read in the full brick catalog if type(stats_filename) == str: stats_filename = [stats_filename] cat = Table.read(stats_filename[0]) if len(stats_filename) > 1: for fname in stats_filename[1:]: tcat = Table.read(fname) cat = vstack([cat, tcat]) # make RA/Dec grid ra = cat["RA"] dec = cat["DEC"] pixsize_degrees = pix_size / 3600 n_x, n_y, ra_delt, dec_delt = calc_nx_ny_from_pixsize(cat, pixsize_degrees) # the ra spacing needs to be larger, as 1 degree of RA == # cos(DEC) degrees on the great circle ra_grid = ra.min() + ra_delt * np.arange(0, n_x + 1, dtype=float) dec_grid = dec.min() + dec_delt * np.arange(0, n_y + 1, dtype=float) # generate the wcs info for the output FITS files w = make_wcs_for_map(ra_grid, dec_grid) # get the pixel coordinates for each source pix_x, pix_y = get_pix_coords(cat, w) # import pdb; pdb.set_trace() # for ease of checking the bin, set x/y coords to integers x = np.floor(pix_x) y = np.floor(pix_y) # setup arrary to store summary stats per pixel sum_stats = ["Av", "Rv", "f_A"] n_sum = len(sum_stats) summary_stats = np.zeros((n_y + 1, n_x + 1, n_sum + 1), dtype=float) summary_sigmas = np.zeros((n_y + 1, n_x + 1, n_sum), dtype=float) values_foreach_pixel = { cur_stat: {(i, j): [] for i in range(n_x + 1) for j in range(n_y + 1)} for cur_stat in sum_stats } # loop through the pixels and generate the summary stats for i in range(n_x + 1): for j in range(n_y + 1): (tindxs,) = np.where((x == i) & (y == j)) # tindxs, = np.where((x == i) & (y == j) & (cat['chi2min'] < 10.)) if len(tindxs) > 0: summary_stats[j, i, n_sum] = len(tindxs) if verbose: print(i, j, len(tindxs)) for k, cur_stat in enumerate(sum_stats): values = cat[cur_stat + "_" + stat_type][tindxs] values_foreach_pixel[cur_stat][i, j] = values summary_stats[j, i, k] = np.average(values) summary_sigmas[j, i, k] = np.std(values, ddof=1) / math.sqrt( len(values) ) master_header = w.to_header() # Now, write the maps to disk for k, cur_stat in enumerate(sum_stats): map_name = stats_filename[0].replace("stats", "map" + cur_stat) hdu = fits.PrimaryHDU(summary_stats[:, :, k], header=master_header) hdu.writeto(map_name, overwrite=True) sigma_name = map_name.replace("map", "map_sigma") hdu_sigma = fits.PrimaryHDU(summary_sigmas[:, :, k], header=master_header) hdu_sigma.writeto(sigma_name, overwrite=True) hdu = fits.PrimaryHDU(summary_stats[:, :, n_sum], header=master_header) hdu.writeto(stats_filename[0].replace("stats", "npts"), overwrite=True) # And store all the values in HDF5 format values_name = stats_filename[0].replace("stats.fits", "values_per_pixel.hd5") f = h5py.File(values_name, "w") dt = h5py.special_dtype(vlen=np.dtype(np.float)) for cur_stat in sum_stats: dset = f.create_dataset(cur_stat, (n_x, n_y), dtype=dt) for i, j in it.product(range(n_x), range(n_y)): dset[i, j] = values_foreach_pixel[cur_stat][i, j]
def create_naive_maps(stats_filename, pix_size=10.0, verbose=False): """ Make the naive maps by directly averaging the BEAST results for all the stars in each pixel. Does not account for completeness, hence naive maps! Parameters ---------- stats_filename : string or list of strings name(s) of the catalog(s) of BEAST results pix_size : float (default=10) size of pixels/regions in arcsec """ # type of statistic (make a commandline parameter later) # remember to add to output filenames stat_type = "Exp" # read in the full brick catalog if type(stats_filename) == str: stats_filename = [stats_filename] cat = Table.read(stats_filename[0]) if len(stats_filename) > 1: for fname in stats_filename[1:]: tcat = Table.read(fname) cat = vstack([cat, tcat]) # make RA/Dec grid ra = cat["RA"] dec = cat["DEC"] pixsize_degrees = pix_size / 3600 n_x, n_y, ra_delt, dec_delt = calc_nx_ny_from_pixsize(cat, pixsize_degrees) # the ra spacing needs to be larger, as 1 degree of RA == # cos(DEC) degrees on the great circle ra_grid = ra.min() + ra_delt * np.arange(0, n_x + 1, dtype=float) dec_grid = dec.min() + dec_delt * np.arange(0, n_y + 1, dtype=float) # generate the wcs info for the output FITS files w = make_wcs_for_map(ra_grid, dec_grid) # get the pixel coordinates for each source pix_x, pix_y = get_pix_coords(cat, w) # import pdb; pdb.set_trace() # for ease of checking the bin, set x/y coords to integers x = np.floor(pix_x) y = np.floor(pix_y) # setup arrary to store summary stats per pixel sum_stats = ["Av", "Rv", "f_A"] n_sum = len(sum_stats) summary_stats = np.zeros((n_y + 1, n_x + 1, n_sum + 1), dtype=float) summary_sigmas = np.zeros((n_y + 1, n_x + 1, n_sum), dtype=float) values_foreach_pixel = { cur_stat: {(i, j): [] for i in range(n_x + 1) for j in range(n_y + 1)} for cur_stat in sum_stats } # loop through the pixels and generate the summary stats for i in range(n_x + 1): for j in range(n_y + 1): (tindxs,) = np.where((x == i) & (y == j)) # tindxs, = np.where((x == i) & (y == j) & (cat['chi2min'] < 10.)) if len(tindxs) > 0: summary_stats[j, i, n_sum] = len(tindxs) if verbose: print(i, j, len(tindxs)) for k, cur_stat in enumerate(sum_stats): values = cat[cur_stat + "_" + stat_type][tindxs] values_foreach_pixel[cur_stat][i, j] = values summary_stats[j, i, k] = np.average(values) summary_sigmas[j, i, k] = np.std(values, ddof=1) / math.sqrt( len(values) ) master_header = w.to_header() # Now, write the maps to disk for k, cur_stat in enumerate(sum_stats): map_name = stats_filename[0].replace("stats", "map" + cur_stat) hdu = fits.PrimaryHDU(summary_stats[:, :, k], header=master_header) hdu.writeto(map_name, overwrite=True) sigma_name = map_name.replace("map", "map_sigma") hdu_sigma = fits.PrimaryHDU(summary_sigmas[:, :, k], header=master_header) hdu_sigma.writeto(sigma_name, overwrite=True) hdu = fits.PrimaryHDU(summary_stats[:, :, n_sum], header=master_header) hdu.writeto(stats_filename[0].replace("stats", "npts"), overwrite=True) # And store all the values in HDF5 format values_name = stats_filename[0].replace("stats.fits", "values_per_pixel.hd5") f = h5py.File(values_name, "w") dt = h5py.special_dtype(vlen=np.dtype(np.float)) for cur_stat in sum_stats: dset = f.create_dataset(cur_stat, (n_x, n_y), dtype=dt) for i, j in it.product(range(n_x), range(n_y)): dset[i, j] = values_foreach_pixel[cur_stat][i, j]
Python
def plot_graphic_model(gtype="text", savefig="png"): """ Plot the graphical model of the BEAST. Parameters ---------- gtype : str [default="text"] "text" for a verbose version, "math" for a compact version savefig : str set to the file extension of desired file to save image of model """ nodes = { "pIMF": ("IMF prior", "pIMF"), "IMF": ("Initial\nMass\nFunction", "<IMF(M<SUB>l</SUB>, M<SUB>h</SUB>, slope(s))>"), "pSFH": ("SFH prior", "pSFH"), "SFH": ("Star\nFormation\nHistory", "SFH(t)"), "pAMR": ("AMR prior", "pAMR"), "AMR": ("Age\nMetallicity\nRelation", "AMR(Z, t)"), "pDP": ("Distance prior", "pD"), "DP": ("Distance", "D(d)"), "pFC": ("prior\nForeground\nDust", "<pFD>"), "FC": ("Foreground\nDust", "<FD(A(V), R(V), f<SUB>A</SUB>)>"), "pGC": ("prior\nInternal\nDust", "<pID>"), "GC": ("Internal\nDust", "<ID(A(V), R(V), f<SUB>A</SUB>)>"), "M": ("mass\nM", "M"), "t": ("age\nT", "t"), "Z": ("metallicity\nZ", "Z"), "d": ("distance\nd", "d"), "Av": ("dust column\nA(V)", "A(V)"), "Rv": ("grain size\nR(V)", "R(V)"), "fA": ("<f<SUB>A</SUB>>", "<f<SUB>A</SUB>>"), "C": ("Completeness", "C(&theta;)"), "Cont": ("Contaminants\n&alpha;", "&alpha;"), "Phys": ("MegaBEAST\nPhysics+Observation Model", "p(&theta;|&phi;)p(&phi;)"), "Like": ("BEAST\nLikelihoods", "<BEAST L(F<SUB>D</SUB>|&theta;)>"), } edges = { "pIMF": "IMF", "IMF": "M", "pSFH": "SFH", "SFH": ("M", "t"), "AMR": ("Z", "t"), "pAMR": "AMR", "pDP": "DP", "DP": "d", "pFC": "FC", "FC": ("Av", "Rv", "fA"), "pGC": "GC", "GC": ("Av", "Rv", "fA"), "M": "Phys", "t": "Phys", "Z": "Phys", "d": "Phys", "Av": "Phys", "Rv": "Phys", "fA": "Phys", "C": "Phys", "Phys": "Like", "Cont": "Like", } beast = create_graphic_model(nodes, edges, gtype) beast.render(f"megabeast-graphic-{gtype}", format=savefig)
def plot_graphic_model(gtype="text", savefig="png"): """ Plot the graphical model of the BEAST. Parameters ---------- gtype : str [default="text"] "text" for a verbose version, "math" for a compact version savefig : str set to the file extension of desired file to save image of model """ nodes = { "pIMF": ("IMF prior", "pIMF"), "IMF": ("Initial\nMass\nFunction", "<IMF(M<SUB>l</SUB>, M<SUB>h</SUB>, slope(s))>"), "pSFH": ("SFH prior", "pSFH"), "SFH": ("Star\nFormation\nHistory", "SFH(t)"), "pAMR": ("AMR prior", "pAMR"), "AMR": ("Age\nMetallicity\nRelation", "AMR(Z, t)"), "pDP": ("Distance prior", "pD"), "DP": ("Distance", "D(d)"), "pFC": ("prior\nForeground\nDust", "<pFD>"), "FC": ("Foreground\nDust", "<FD(A(V), R(V), f<SUB>A</SUB>)>"), "pGC": ("prior\nInternal\nDust", "<pID>"), "GC": ("Internal\nDust", "<ID(A(V), R(V), f<SUB>A</SUB>)>"), "M": ("mass\nM", "M"), "t": ("age\nT", "t"), "Z": ("metallicity\nZ", "Z"), "d": ("distance\nd", "d"), "Av": ("dust column\nA(V)", "A(V)"), "Rv": ("grain size\nR(V)", "R(V)"), "fA": ("<f<SUB>A</SUB>>", "<f<SUB>A</SUB>>"), "C": ("Completeness", "C(&theta;)"), "Cont": ("Contaminants\n&alpha;", "&alpha;"), "Phys": ("MegaBEAST\nPhysics+Observation Model", "p(&theta;|&phi;)p(&phi;)"), "Like": ("BEAST\nLikelihoods", "<BEAST L(F<SUB>D</SUB>|&theta;)>"), } edges = { "pIMF": "IMF", "IMF": "M", "pSFH": "SFH", "SFH": ("M", "t"), "AMR": ("Z", "t"), "pAMR": "AMR", "pDP": "DP", "DP": "d", "pFC": "FC", "FC": ("Av", "Rv", "fA"), "pGC": "GC", "GC": ("Av", "Rv", "fA"), "M": "Phys", "t": "Phys", "Z": "Phys", "d": "Phys", "Av": "Phys", "Rv": "Phys", "fA": "Phys", "C": "Phys", "Phys": "Like", "Cont": "Like", } beast = create_graphic_model(nodes, edges, gtype) beast.render(f"megabeast-graphic-{gtype}", format=savefig)
Python
def fit_ensemble(beast_data, lnp_filename, beast_priormodel, nstars_expected=None): """ Run the MegaBEAST on a single set of BEAST results. Parameters ---------- beast_data : dict information about the BEAST runs including SED grid and noise model lnp_filename : string file with posteriors from BEAST fitting beast_priormodel : dict dictionary of the BEAST prior model information nstars_expected : int number of stars expected, used as a check Returns ------- fit_results : array set of best fit parameters """ # get the saved sparse likelihoods lnp_data = read_lnp_data(lnp_filename, nstars=nstars_expected, shift_lnp=True) # get the completeness and BEAST model parameters for the # same grid points as the sparse likelihoods lnp_grid_vals = get_lnp_grid_vals(beast_data, lnp_data) # compute the BEAST prior weights # needed so the BEAST posteriors updated with the MegaBEAST model # ***currently only AV ensemble model supported*** avs = lnp_grid_vals["Av"] rvs = [3.1] # beast_data['Rv'] fAs = [1.0] # beast_data['f_A'] beast_dust_priors = PriorWeightsDust( avs, beast_priormodel["AV"], rvs, beast_priormodel["RV"], fAs, beast_priormodel["fA"], ) # standard minimization to find initial values def chi2(*args): return -1.0 * lnprob(*args) result = op.minimize( chi2, [0.25, 2.0, 0.5, 0.5, 1], args=(beast_dust_priors, lnp_data, lnp_grid_vals), method="Nelder-Mead", ) # next step would be to # run through MCMC to fully sample likelihood # maybe include option not to run MCMC return result["x"]
def fit_ensemble(beast_data, lnp_filename, beast_priormodel, nstars_expected=None): """ Run the MegaBEAST on a single set of BEAST results. Parameters ---------- beast_data : dict information about the BEAST runs including SED grid and noise model lnp_filename : string file with posteriors from BEAST fitting beast_priormodel : dict dictionary of the BEAST prior model information nstars_expected : int number of stars expected, used as a check Returns ------- fit_results : array set of best fit parameters """ # get the saved sparse likelihoods lnp_data = read_lnp_data(lnp_filename, nstars=nstars_expected, shift_lnp=True) # get the completeness and BEAST model parameters for the # same grid points as the sparse likelihoods lnp_grid_vals = get_lnp_grid_vals(beast_data, lnp_data) # compute the BEAST prior weights # needed so the BEAST posteriors updated with the MegaBEAST model # ***currently only AV ensemble model supported*** avs = lnp_grid_vals["Av"] rvs = [3.1] # beast_data['Rv'] fAs = [1.0] # beast_data['f_A'] beast_dust_priors = PriorWeightsDust( avs, beast_priormodel["AV"], rvs, beast_priormodel["RV"], fAs, beast_priormodel["fA"], ) # standard minimization to find initial values def chi2(*args): return -1.0 * lnprob(*args) result = op.minimize( chi2, [0.25, 2.0, 0.5, 0.5, 1], args=(beast_dust_priors, lnp_data, lnp_grid_vals), method="Nelder-Mead", ) # next step would be to # run through MCMC to fully sample likelihood # maybe include option not to run MCMC return result["x"]
Python
def megabeast_single(): """ Run the MegaBEAST on a single set of BEAST results. """ pass
def megabeast_single(): """ Run the MegaBEAST on a single set of BEAST results. """ pass
Python
def megabeast_image(megabeast_input_file, verbose=True): """ Run the MegaBEAST on an image of BEAST results. The BEAST results are given as spatially-reordered BEAST outputs with a file of lnp results for each pixel in the image. Parameters ---------- megabeast_input_file : string Name of the file that contains settings, filenames, etc verbose : boolean (default=True) print extra info """ # read in the settings from the file params = read_input(megabeast_input_file) # use nstars image to setup for each pixel nstars_image, nstars_header = fits.getdata(params["nstars_filename"], header=True) n_x, n_y = nstars_image.shape # read in the beast data that is needed by all the pixels beast_data = {} # - SED data beast_data.update(read_sed_data(params["beast_seds_filename"], param_list=["Av"])) # - max completeness beast_data.update( read_noise_data(params["beast_noise_filename"], param_list=["completeness"],) ) # completeness from toothpick model so n band completeness values # require only 1 completeness value for each model # max picked (may not be correct) beast_data["completeness"] = np.max(beast_data["completeness"], axis=1) # BEAST prior model beast_pmodel = {} beast_pmodel["AV"] = params["av_prior_model"] beast_pmodel["RV"] = params["rv_prior_model"] beast_pmodel["fA"] = params["fA_prior_model"] # setup for output pixel_fit_status = np.full((n_x, n_y), False, dtype=bool) n_fit_params = len(params["fit_param_names"]) best_fit_images = np.zeros((n_x, n_y, n_fit_params), dtype=float) + np.nan # loop over the pixels with non-zero entries in the nstars image for i in trange(n_x, desc="x pixels"): for j in trange(n_y, desc="y pixels", leave=False): if nstars_image[i, j] >= params["min_for_fit"]: pixel_fit_status[i, j] = True # filename with saved BEAST posteriors lnp_prefix = params["lnp_file_prefix"] lnp_filename = f"{lnp_prefix}_{j}_{i}_lnp.hd5" best_fit_params = fit_ensemble( beast_data, lnp_filename, beast_pmodel, nstars_expected=nstars_image[i, j], ) best_fit_images[i, j, :] = best_fit_params # output results (* = future) # - best fit # - *megabeast parameter 1D pPDFs # - *MCMC chain # Write the maps to disk master_header = nstars_header # check that the directory exists dpath = "./%s_megabeast/" % (params["projectname"]) if not os.path.exists(dpath): os.makedirs(dpath) for k, cname in enumerate(params["fit_param_names"]): hdu = fits.PrimaryHDU(best_fit_images[:, :, k], header=master_header) hdu.writeto( "%s_megabeast/%s_%s_bestfit.fits" % (params["projectname"], params["projectname"], cname), overwrite=True, )
def megabeast_image(megabeast_input_file, verbose=True): """ Run the MegaBEAST on an image of BEAST results. The BEAST results are given as spatially-reordered BEAST outputs with a file of lnp results for each pixel in the image. Parameters ---------- megabeast_input_file : string Name of the file that contains settings, filenames, etc verbose : boolean (default=True) print extra info """ # read in the settings from the file params = read_input(megabeast_input_file) # use nstars image to setup for each pixel nstars_image, nstars_header = fits.getdata(params["nstars_filename"], header=True) n_x, n_y = nstars_image.shape # read in the beast data that is needed by all the pixels beast_data = {} # - SED data beast_data.update(read_sed_data(params["beast_seds_filename"], param_list=["Av"])) # - max completeness beast_data.update( read_noise_data(params["beast_noise_filename"], param_list=["completeness"],) ) # completeness from toothpick model so n band completeness values # require only 1 completeness value for each model # max picked (may not be correct) beast_data["completeness"] = np.max(beast_data["completeness"], axis=1) # BEAST prior model beast_pmodel = {} beast_pmodel["AV"] = params["av_prior_model"] beast_pmodel["RV"] = params["rv_prior_model"] beast_pmodel["fA"] = params["fA_prior_model"] # setup for output pixel_fit_status = np.full((n_x, n_y), False, dtype=bool) n_fit_params = len(params["fit_param_names"]) best_fit_images = np.zeros((n_x, n_y, n_fit_params), dtype=float) + np.nan # loop over the pixels with non-zero entries in the nstars image for i in trange(n_x, desc="x pixels"): for j in trange(n_y, desc="y pixels", leave=False): if nstars_image[i, j] >= params["min_for_fit"]: pixel_fit_status[i, j] = True # filename with saved BEAST posteriors lnp_prefix = params["lnp_file_prefix"] lnp_filename = f"{lnp_prefix}_{j}_{i}_lnp.hd5" best_fit_params = fit_ensemble( beast_data, lnp_filename, beast_pmodel, nstars_expected=nstars_image[i, j], ) best_fit_images[i, j, :] = best_fit_params # output results (* = future) # - best fit # - *megabeast parameter 1D pPDFs # - *MCMC chain # Write the maps to disk master_header = nstars_header # check that the directory exists dpath = "./%s_megabeast/" % (params["projectname"]) if not os.path.exists(dpath): os.makedirs(dpath) for k, cname in enumerate(params["fit_param_names"]): hdu = fits.PrimaryHDU(best_fit_images[:, :, k], header=master_header) hdu.writeto( "%s_megabeast/%s_%s_bestfit.fits" % (params["projectname"], params["projectname"], cname), overwrite=True, )
Python
def logdet_marg(self, sPc_tril, sPr_tril, N): """Merit correction of state marginalization.""" sPc = tril_mat(sPc_tril) sPr = tril_mat(sPr_tril) log_det_sPc = sum(sympy.log(d) for d in sPc.diagonal()) log_det_sPr = sum(sympy.log(d) for d in sPr.diagonal()) return (N - 1) * log_det_sPr + log_det_sPc
def logdet_marg(self, sPc_tril, sPr_tril, N): """Merit correction of state marginalization.""" sPc = tril_mat(sPc_tril) sPr = tril_mat(sPr_tril) log_det_sPc = sum(sympy.log(d) for d in sPc.diagonal()) log_det_sPr = sum(sympy.log(d) for d in sPr.diagonal()) return (N - 1) * log_det_sPr + log_det_sPc
Python
def logdet_marg(self, sPc_tril, sPr_tril, N): """Merit correction of state marginalization.""" sPc = tril_mat(sPc_tril) sPr = tril_mat(sPr_tril) log_det_sPc = sum(sympy.log(d) for d in sPc.diagonal()) log_det_sPr = sum(sympy.log(d) for d in sPr.diagonal()) return (N-1) * log_det_sPr + log_det_sPc
def logdet_marg(self, sPc_tril, sPr_tril, N): """Merit correction of state marginalization.""" sPc = tril_mat(sPc_tril) sPr = tril_mat(sPr_tril) log_det_sPc = sum(sympy.log(d) for d in sPc.diagonal()) log_det_sPr = sum(sympy.log(d) for d in sPr.diagonal()) return (N-1) * log_det_sPr + log_det_sPc
Python
def drive(self, speed, rotation_speed, tm_diff): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. You can either calculate the speed & rotation manually, or you can use the predefined functions in :mod:`pyfrc.physics.drivetrains`. The outputs of the `drivetrains.*` functions should be passed to this function. .. note:: The simulator currently only allows 2D motion :param speed: Speed of robot in ft/s :param rotation_speed: Clockwise rotational speed in radians/s :param tm_diff: Amount of time speed was traveled (this is the same value that was passed to update_sim) """ # if the robot is disabled, don't do anything if not self.robot_enabled: return distance = speed * tm_diff angle = rotation_speed * tm_diff x = distance * math.cos(angle) y = distance * math.sin(angle) self.distance_drive(x, y, angle)
def drive(self, speed, rotation_speed, tm_diff): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. You can either calculate the speed & rotation manually, or you can use the predefined functions in :mod:`pyfrc.physics.drivetrains`. The outputs of the `drivetrains.*` functions should be passed to this function. .. note:: The simulator currently only allows 2D motion :param speed: Speed of robot in ft/s :param rotation_speed: Clockwise rotational speed in radians/s :param tm_diff: Amount of time speed was traveled (this is the same value that was passed to update_sim) """ # if the robot is disabled, don't do anything if not self.robot_enabled: return distance = speed * tm_diff angle = rotation_speed * tm_diff x = distance * math.cos(angle) y = distance * math.sin(angle) self.distance_drive(x, y, angle)
Python
def vector_drive(self, vx, vy, vw, tm_diff): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. This moves the robot using a velocity vector relative to the robot instead of by speed/rotation speed. :param vx: Speed in x direction relative to robot in ft/s :param vy: Speed in y direction relative to robot in ft/s :param vw: Clockwise rotational speed in rad/s :param tm_diff: Amount of time speed was traveled """ # if the robot is disabled, don't do anything if not self.robot_enabled: return angle = vw * tm_diff vx = vx * tm_diff vy = vy * tm_diff x = vx * math.sin(angle) + vy * math.cos(angle) y = vx * math.cos(angle) + vy * math.sin(angle) self.distance_drive(x, y, angle)
def vector_drive(self, vx, vy, vw, tm_diff): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. This moves the robot using a velocity vector relative to the robot instead of by speed/rotation speed. :param vx: Speed in x direction relative to robot in ft/s :param vy: Speed in y direction relative to robot in ft/s :param vw: Clockwise rotational speed in rad/s :param tm_diff: Amount of time speed was traveled """ # if the robot is disabled, don't do anything if not self.robot_enabled: return angle = vw * tm_diff vx = vx * tm_diff vy = vy * tm_diff x = vx * math.sin(angle) + vy * math.cos(angle) y = vx * math.cos(angle) + vy * math.sin(angle) self.distance_drive(x, y, angle)
Python
def distance_drive(self, x, y, angle): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. This moves the robot some relative distance and angle from its current position. :param x: Feet to move the robot in the x direction :param y: Feet to move the robot in the y direction :param angle: Radians to turn the robot """ with self._lock: self.vx += x self.vy += y self.angle += angle c = math.cos(self.angle) s = math.sin(self.angle) self.x += x * c - y * s self.y += x * s + y * c self._update_gyros(angle)
def distance_drive(self, x, y, angle): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. This moves the robot some relative distance and angle from its current position. :param x: Feet to move the robot in the x direction :param y: Feet to move the robot in the y direction :param angle: Radians to turn the robot """ with self._lock: self.vx += x self.vy += y self.angle += angle c = math.cos(self.angle) s = math.sin(self.angle) self.x += x * c - y * s self.y += x * s + y * c self._update_gyros(angle)
Python
def unwrap_model(model: torch.nn.Module) -> torch.nn.Module: """ Recursively unwraps a model from potential containers (as used in distributed training). Args: model (:obj:`torch.nn.Module`): The model to unwrap. """ # since there could be multiple levels of wrapping, unwrap recursively if hasattr(model, "module"): return unwrap_model(model.module) else: return model
def unwrap_model(model: torch.nn.Module) -> torch.nn.Module: """ Recursively unwraps a model from potential containers (as used in distributed training). Args: model (:obj:`torch.nn.Module`): The model to unwrap. """ # since there could be multiple levels of wrapping, unwrap recursively if hasattr(model, "module"): return unwrap_model(model.module) else: return model
Python
def _switch_replace_neg(candidate, h_ent_id, h_ent_str, t_ent_id, t_ent_str, rep_pairs: Dict[int, str] = None): """ Enumerate all the possible triplets and replace. """ entities = candidate["ent"] non_target_ent_ids = [ent_id for ent_id in entities if ent_id not in [h_ent_id, t_ent_id]] h_t_ent_ids = [h_ent_id, t_ent_id] assert h_ent_id in entities and t_ent_id in entities str_map = { h_ent_id: h_ent_str, t_ent_id: t_ent_str } ent_name_set = { ent_id: set([_mention["name"] for _mention in entities[ent_id].values()]) for ent_id in entities } if rep_pairs is None: rep_pairs = {} # Currently, we only sample exactly one non-target neg_res = [] for _non_tgt in non_target_ent_ids: _non_tgt_str = get_ent_str(candidate, _non_tgt) str_map[_non_tgt] = _non_tgt_str _source = h_t_ent_ids + [_non_tgt] _target = h_t_ent_ids + [_non_tgt] # The ``get_all_permutation`` function ensure that the obtained permutations # are **all** not the same with the initial permutation. _all_perm = get_all_permutation(_target) if _permutation_sample_num < len(_all_perm): _perm_sample_ls = random.sample(_all_perm, _permutation_sample_num) else: _perm_sample_ls = _all_perm for _perm in _perm_sample_ls: assert len(_perm) == len(_source) assert _perm != _source _rep_pairs_copy = copy.deepcopy(rep_pairs) _same_n = 0 for _src, _tgt in zip(_source, _perm): _rep_pairs_copy[_src] = rep_pairs[_tgt] if _tgt in rep_pairs else str_map[_tgt] if _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]: _same_n += 1 if _same_n == len(_source): continue neg_res.append(_replace_entities_w_str(candidate, _rep_pairs_copy)) return neg_res
def _switch_replace_neg(candidate, h_ent_id, h_ent_str, t_ent_id, t_ent_str, rep_pairs: Dict[int, str] = None): """ Enumerate all the possible triplets and replace. """ entities = candidate["ent"] non_target_ent_ids = [ent_id for ent_id in entities if ent_id not in [h_ent_id, t_ent_id]] h_t_ent_ids = [h_ent_id, t_ent_id] assert h_ent_id in entities and t_ent_id in entities str_map = { h_ent_id: h_ent_str, t_ent_id: t_ent_str } ent_name_set = { ent_id: set([_mention["name"] for _mention in entities[ent_id].values()]) for ent_id in entities } if rep_pairs is None: rep_pairs = {} # Currently, we only sample exactly one non-target neg_res = [] for _non_tgt in non_target_ent_ids: _non_tgt_str = get_ent_str(candidate, _non_tgt) str_map[_non_tgt] = _non_tgt_str _source = h_t_ent_ids + [_non_tgt] _target = h_t_ent_ids + [_non_tgt] # The ``get_all_permutation`` function ensure that the obtained permutations # are **all** not the same with the initial permutation. _all_perm = get_all_permutation(_target) if _permutation_sample_num < len(_all_perm): _perm_sample_ls = random.sample(_all_perm, _permutation_sample_num) else: _perm_sample_ls = _all_perm for _perm in _perm_sample_ls: assert len(_perm) == len(_source) assert _perm != _source _rep_pairs_copy = copy.deepcopy(rep_pairs) _same_n = 0 for _src, _tgt in zip(_source, _perm): _rep_pairs_copy[_src] = rep_pairs[_tgt] if _tgt in rep_pairs else str_map[_tgt] if _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]: _same_n += 1 if _same_n == len(_source): continue neg_res.append(_replace_entities_w_str(candidate, _rep_pairs_copy)) return neg_res
Python
def masked_layer_norm(layer_norm_layer, inputs, mask=None): """ Masked LayerNorm which will apply mask over the output of LayerNorm to avoid inaccurate updatings to the LayerNorm module. Args: layer_norm_layer (:obj:`~DeBERTa.deberta.LayerNorm`): LayerNorm module or function inputs (:obj:`torch.tensor`): The input tensor mask (:obj:`torch.IntTensor`): The mask to applied on the output of LayerNorm where `0` indicate the output of that element will be ignored, i.e. set to `0` Example:: # Create a tensor b x n x d x = torch.randn([1,10,100]) m = torch.tensor([[1,1,1,0,0,0,0,0,0,0]], dtype=torch.int) LayerNorm = DeBERTa.deberta.LayerNorm(100) y = MaskedLayerNorm(LayerNorm, x, m) """ output = layer_norm_layer(inputs).to(inputs) if mask is None: return output if mask.dim() != inputs.dim(): if mask.dim() == 4: mask = mask.squeeze(1).squeeze(1) mask = mask.unsqueeze(2) mask = mask.to(output.dtype) return output * mask
def masked_layer_norm(layer_norm_layer, inputs, mask=None): """ Masked LayerNorm which will apply mask over the output of LayerNorm to avoid inaccurate updatings to the LayerNorm module. Args: layer_norm_layer (:obj:`~DeBERTa.deberta.LayerNorm`): LayerNorm module or function inputs (:obj:`torch.tensor`): The input tensor mask (:obj:`torch.IntTensor`): The mask to applied on the output of LayerNorm where `0` indicate the output of that element will be ignored, i.e. set to `0` Example:: # Create a tensor b x n x d x = torch.randn([1,10,100]) m = torch.tensor([[1,1,1,0,0,0,0,0,0,0]], dtype=torch.int) LayerNorm = DeBERTa.deberta.LayerNorm(100) y = MaskedLayerNorm(LayerNorm, x, m) """ output = layer_norm_layer(inputs).to(inputs) if mask is None: return output if mask.dim() != inputs.dim(): if mask.dim() == 4: mask = mask.squeeze(1).squeeze(1) mask = mask.unsqueeze(2) mask = mask.to(output.dtype) return output * mask
Python
def _switch_replace_neg(candidate, h_ent_id, h_ent_str, t_ent_id, t_ent_str, rep_pairs: Dict[int, str] = None): """ Enumerate all the possible triplets and replace. """ entities = candidate["ent"] non_target_ent_ids = [ent_id for ent_id in entities if ent_id not in [h_ent_id, t_ent_id]] h_t_ent_ids = [h_ent_id, t_ent_id] # assert h_ent_id in entities and t_ent_id in entities # FIXME: Comment this line if ``random_ht == True`` str_map = { h_ent_id: h_ent_str, t_ent_id: t_ent_str } ent_name_set = { ent_id: set([_mention["name"] for _mention in entities[ent_id].values()]) for ent_id in entities } if rep_pairs is None: rep_pairs = {} # Currently, we only sample exactly one non-target neg_res = [] for _non_tgt in non_target_ent_ids: _non_tgt_str = get_ent_str(candidate, _non_tgt) str_map[_non_tgt] = _non_tgt_str _source = h_t_ent_ids + [_non_tgt] _target = h_t_ent_ids + [_non_tgt] # The ``get_all_permutation`` function ensure that the obtained permutations # are **all** not the same with the initial permutation. _all_perm = get_all_permutation(_target) if _permutation_sample_num < len(_all_perm): _perm_sample_ls = random.sample(_all_perm, _permutation_sample_num) else: _perm_sample_ls = _all_perm for _perm in _perm_sample_ls: assert len(_perm) == len(_source) assert _perm != _source _rep_pairs_copy = copy.deepcopy(rep_pairs) _same_n = 0 for _src, _tgt in zip(_source, _perm): _rep_pairs_copy[_src] = rep_pairs[_tgt] if _tgt in rep_pairs else str_map[_tgt] # if _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]: # FIXME: This is used when ``random_ht == True`` if _src in str_map and _src in ent_name_set and ( _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]): _same_n += 1 if _same_n == len(_source): continue neg_res.append(_replace_entities_w_str(candidate, _rep_pairs_copy)) return neg_res
def _switch_replace_neg(candidate, h_ent_id, h_ent_str, t_ent_id, t_ent_str, rep_pairs: Dict[int, str] = None): """ Enumerate all the possible triplets and replace. """ entities = candidate["ent"] non_target_ent_ids = [ent_id for ent_id in entities if ent_id not in [h_ent_id, t_ent_id]] h_t_ent_ids = [h_ent_id, t_ent_id] # assert h_ent_id in entities and t_ent_id in entities # FIXME: Comment this line if ``random_ht == True`` str_map = { h_ent_id: h_ent_str, t_ent_id: t_ent_str } ent_name_set = { ent_id: set([_mention["name"] for _mention in entities[ent_id].values()]) for ent_id in entities } if rep_pairs is None: rep_pairs = {} # Currently, we only sample exactly one non-target neg_res = [] for _non_tgt in non_target_ent_ids: _non_tgt_str = get_ent_str(candidate, _non_tgt) str_map[_non_tgt] = _non_tgt_str _source = h_t_ent_ids + [_non_tgt] _target = h_t_ent_ids + [_non_tgt] # The ``get_all_permutation`` function ensure that the obtained permutations # are **all** not the same with the initial permutation. _all_perm = get_all_permutation(_target) if _permutation_sample_num < len(_all_perm): _perm_sample_ls = random.sample(_all_perm, _permutation_sample_num) else: _perm_sample_ls = _all_perm for _perm in _perm_sample_ls: assert len(_perm) == len(_source) assert _perm != _source _rep_pairs_copy = copy.deepcopy(rep_pairs) _same_n = 0 for _src, _tgt in zip(_source, _perm): _rep_pairs_copy[_src] = rep_pairs[_tgt] if _tgt in rep_pairs else str_map[_tgt] # if _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]: # FIXME: This is used when ``random_ht == True`` if _src in str_map and _src in ent_name_set and ( _rep_pairs_copy[_src].lower() == str_map[_src].lower() or _rep_pairs_copy[_src].lower() in ent_name_set[_src]): _same_n += 1 if _same_n == len(_source): continue neg_res.append(_replace_entities_w_str(candidate, _rep_pairs_copy)) return neg_res
Python
def mask_tokens( self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) # Remove padding. special_tokens_mask = special_tokens_mask | (labels == self.tokenizer.pad_token_id) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -1 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels
def mask_tokens( self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) # Remove padding. special_tokens_mask = special_tokens_mask | (labels == self.tokenizer.pad_token_id) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -1 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels
Python
def process_tweet(tweet): ''' This function processes the tweets and removes extra words/characters that are not needed like, usernames, URLs etc. ''' # Convert to lower case tweet = tweet.lower() # Convert www.* or https?://* to URL tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', tweet) # Convert @username to AT_USER tweet = re.sub('@[^\s]+', 'AT_USER', tweet) # Remove additional white spaces tweet = re.sub('[\s]+', ' ', tweet) # Replace #word with word tweet = re.sub(r'#([^\s]+)', r'\1', tweet) # trim tweet = tweet.strip('\'"') # remove first/last " or 'at string end tweet = tweet.rstrip('\'"') tweet = tweet.lstrip('\'"') return tweet
def process_tweet(tweet): ''' This function processes the tweets and removes extra words/characters that are not needed like, usernames, URLs etc. ''' # Convert to lower case tweet = tweet.lower() # Convert www.* or https?://* to URL tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', tweet) # Convert @username to AT_USER tweet = re.sub('@[^\s]+', 'AT_USER', tweet) # Remove additional white spaces tweet = re.sub('[\s]+', ' ', tweet) # Replace #word with word tweet = re.sub(r'#([^\s]+)', r'\1', tweet) # trim tweet = tweet.strip('\'"') # remove first/last " or 'at string end tweet = tweet.rstrip('\'"') tweet = tweet.lstrip('\'"') return tweet
Python
def replace_two_or_more(s): ''' look for 2 or more repetitions of character and replace with the character itself. ''' pattern = re.compile(r"(.)\1{1,}", re.DOTALL) return pattern.sub(r"\1\1", s)
def replace_two_or_more(s): ''' look for 2 or more repetitions of character and replace with the character itself. ''' pattern = re.compile(r"(.)\1{1,}", re.DOTALL) return pattern.sub(r"\1\1", s)
Python
def gen_features(training_datafile): ''' Function to generate feature list from test data. It is used only once and the features are saved in a file that can be later used. ''' stop_words = get_stop_wordlist('app/data/stopwords.txt') feature_list_file = open('app/data/feature_list.txt', 'w+') tweet_items = get_filtered_training_data(training_datafile) all_words = [] for (tweet, sentiment) in tweet_items: tweet = process_tweet(tweet) words_filtered = [e.lower() for e in tweet.split() if(is_ascii(e))] for word in words_filtered: word = replace_two_or_more(word) word = word.strip('\'"?,.') val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", word) if(word in stop_words or val is None or len(word) < 3): continue else: all_words.append(word) freq_dist = nltk.FreqDist(all_words) word_features = sorted(freq_dist.items(), key=lambda x: x[1], reverse=True) for word, freq in word_features: feature_list_file.write('{}\n'.format(word)) feature_list_file.close()
def gen_features(training_datafile): ''' Function to generate feature list from test data. It is used only once and the features are saved in a file that can be later used. ''' stop_words = get_stop_wordlist('app/data/stopwords.txt') feature_list_file = open('app/data/feature_list.txt', 'w+') tweet_items = get_filtered_training_data(training_datafile) all_words = [] for (tweet, sentiment) in tweet_items: tweet = process_tweet(tweet) words_filtered = [e.lower() for e in tweet.split() if(is_ascii(e))] for word in words_filtered: word = replace_two_or_more(word) word = word.strip('\'"?,.') val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", word) if(word in stop_words or val is None or len(word) < 3): continue else: all_words.append(word) freq_dist = nltk.FreqDist(all_words) word_features = sorted(freq_dist.items(), key=lambda x: x[1], reverse=True) for word, freq in word_features: feature_list_file.write('{}\n'.format(word)) feature_list_file.close()
Python
def train_model(test_tweets_file): ''' This function trains the svm classifier using the training data. ''' training_datafile = 'app/data/training.csv' svm_classifier_dumpfile = 'app/data/svm_trained_model.pickle' sys.stdout.flush() sc = SVMClassifier(training_datafile, svm_classifier_dumpfile, training_required=1) # get tweets from file tweets_file = open(test_tweets_file) tweets = pickle.load(tweets_file) tweets_file.close() print 'Classifying' sys.stdout.flush() results = sc.classify(tweets) for key in results: for item in results[key]: print results[key][item] print 'Computing Accuracy' sc.accuracy() print 'Done' sys.stdout.flush()
def train_model(test_tweets_file): ''' This function trains the svm classifier using the training data. ''' training_datafile = 'app/data/training.csv' svm_classifier_dumpfile = 'app/data/svm_trained_model.pickle' sys.stdout.flush() sc = SVMClassifier(training_datafile, svm_classifier_dumpfile, training_required=1) # get tweets from file tweets_file = open(test_tweets_file) tweets = pickle.load(tweets_file) tweets_file.close() print 'Classifying' sys.stdout.flush() results = sc.classify(tweets) for key in results: for item in results[key]: print results[key][item] print 'Computing Accuracy' sc.accuracy() print 'Done' sys.stdout.flush()
Python
def make_api_request(url, http_method="GET", post_body=None, http_headers=None): ''' Make API request to get tweets. ''' consumer = oauth2.Consumer(key=settings.CONSUMER_KEY, secret=settings.CONSUMER_SECRET) token = oauth2.Token(key=settings.ACCESS_TOKEN, secret=settings.ACCESS_TOKEN_SECRET) client = oauth2.Client(consumer, token) resp, content = client.request( url, method=http_method, body=post_body or '', headers=http_headers ) return content
def make_api_request(url, http_method="GET", post_body=None, http_headers=None): ''' Make API request to get tweets. ''' consumer = oauth2.Consumer(key=settings.CONSUMER_KEY, secret=settings.CONSUMER_SECRET) token = oauth2.Token(key=settings.ACCESS_TOKEN, secret=settings.ACCESS_TOKEN_SECRET) client = oauth2.Client(consumer, token) resp, content = client.request( url, method=http_method, body=post_body or '', headers=http_headers ) return content