language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def posts(self, user): """ Return all user's posts. """ key = make_key('posts', user.pk) posts = cache.get(key) if posts is None: posts = self.filter(author=user) cache.set(key, posts) return posts
def posts(self, user): """ Return all user's posts. """ key = make_key('posts', user.pk) posts = cache.get(key) if posts is None: posts = self.filter(author=user) cache.set(key, posts) return posts
Python
def post_count(self, user): """ Return a count of one user's posts. """ count = self.posts(user).count() return count
def post_count(self, user): """ Return a count of one user's posts. """ count = self.posts(user).count() return count
Python
def add_to_timeline(self, instance, user): """ Add instance to user's timeline when saving. """ ctype = ContentType.objects.get_for_model(instance) timeline, created = self.get_or_create(content_type=ctype, object_id=instance.pk, user=user, date=instance.created) if created: cache_bust([('posts_timeline', user.pk)]) return timeline return False
def add_to_timeline(self, instance, user): """ Add instance to user's timeline when saving. """ ctype = ContentType.objects.get_for_model(instance) timeline, created = self.get_or_create(content_type=ctype, object_id=instance.pk, user=user, date=instance.created) if created: cache_bust([('posts_timeline', user.pk)]) return timeline return False
Python
def remove_from_timeline(self, instance, user): """ Remove instance from user's timeline when deleting. """ ctype = ContentType.objects.get_for_model(instance) try: timeline = self.get(content_type=ctype, object_id=instance.pk, user=user) timeline.delete() return True except self.model.DoesNotExist: raise ObjectDoesNotExist('Failure trying to delete {instance}'.format(instance=instance.title))
def remove_from_timeline(self, instance, user): """ Remove instance from user's timeline when deleting. """ ctype = ContentType.objects.get_for_model(instance) try: timeline = self.get(content_type=ctype, object_id=instance.pk, user=user) timeline.delete() return True except self.model.DoesNotExist: raise ObjectDoesNotExist('Failure trying to delete {instance}'.format(instance=instance.title))
Python
def _is_installed_app(self, app): """ Check if a concrete web application is installed in settings.INSTALLED_APPS. """ installed_apps = getattr(settings, 'INSTALLED_APPS', None) if installed_apps is None: raise AttributeError('Error getting INSTALLED_APPS attribute.') if app in installed_apps: return True else: installed_apps_short = [apps_long.split('.')[-1] for apps_long in installed_apps] if app in installed_apps_short: return True return False
def _is_installed_app(self, app): """ Check if a concrete web application is installed in settings.INSTALLED_APPS. """ installed_apps = getattr(settings, 'INSTALLED_APPS', None) if installed_apps is None: raise AttributeError('Error getting INSTALLED_APPS attribute.') if app in installed_apps: return True else: installed_apps_short = [apps_long.split('.')[-1] for apps_long in installed_apps] if app in installed_apps_short: return True return False
Python
def _get_opened_tabs_from_json_recovery( recovery_json: typ.Dict[str, typ.Any] ) -> typ.List[typ.Dict[str, typ.Any]]: """Extract the currently opened tabs from the recovery.json data. :param recovery_json: A dictionary obtained from the json.load method of Python's json standard module. The file represented is recovery.jsonlz4 after uncompression. :return: a list containing all the opened tabs of all the opened Firefox instances. """ opened_tabs = list() for window in recovery_json["windows"]: for tab in window["tabs"]: current_index = int(tab["index"]) - 1 current_tab = tab["entries"][current_index] opened_tabs.append(current_tab) return opened_tabs
def _get_opened_tabs_from_json_recovery( recovery_json: typ.Dict[str, typ.Any] ) -> typ.List[typ.Dict[str, typ.Any]]: """Extract the currently opened tabs from the recovery.json data. :param recovery_json: A dictionary obtained from the json.load method of Python's json standard module. The file represented is recovery.jsonlz4 after uncompression. :return: a list containing all the opened tabs of all the opened Firefox instances. """ opened_tabs = list() for window in recovery_json["windows"]: for tab in window["tabs"]: current_index = int(tab["index"]) - 1 current_tab = tab["entries"][current_index] opened_tabs.append(current_tab) return opened_tabs
Python
def __signal_handler(self, signum, frame): """On sigint (e.g. CTRL+C) stop the crawler. Args: signum (int): The signal number. frame (obj): The current stack frame. """ self.__crawler_stop()
def __signal_handler(self, signum, frame): """On sigint (e.g. CTRL+C) stop the crawler. Args: signum (int): The signal number. frame (obj): The current stack frame. """ self.__crawler_stop()
Python
def start_with(self, request): """Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`): The startpoint for the crawler. """ HTTPRequestHelper.patch_with_options(request, self.__options) self.queue.add_request(request) self.__crawler_start()
def start_with(self, request): """Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`): The startpoint for the crawler. """ HTTPRequestHelper.patch_with_options(request, self.__options) self.queue.add_request(request) self.__crawler_start()
Python
def __spawn_new_requests(self): """Spawn new requests until the max threads option value is reached. Note: If no new requests were spawned and there are no requests in progress the crawler will stop crawling. """ self.__should_spawn_new_requests = False in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS)) while in_progress_count < self.__options.performance.max_threads: if self.__spawn_new_request(): in_progress_count += 1 else: break if in_progress_count == 0: self.__crawler_stop()
def __spawn_new_requests(self): """Spawn new requests until the max threads option value is reached. Note: If no new requests were spawned and there are no requests in progress the crawler will stop crawling. """ self.__should_spawn_new_requests = False in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS)) while in_progress_count < self.__options.performance.max_threads: if self.__spawn_new_request(): in_progress_count += 1 else: break if in_progress_count == 0: self.__crawler_stop()
Python
def __wait_for_current_threads(self): """Wait until all the current threads are finished.""" for thread in list(self.__threads.values()): thread.join()
def __wait_for_current_threads(self): """Wait until all the current threads are finished.""" for thread in list(self.__threads.values()): thread.join()
Python
def __crawler_finish(self): """Called when the crawler is finished because there are no queued requests left or it was stopped.""" try: self.__options.callbacks.crawler_after_finish(self.queue) except Exception as e: print(e) print(traceback.format_exc())
def __crawler_finish(self): """Called when the crawler is finished because there are no queued requests left or it was stopped.""" try: self.__options.callbacks.crawler_after_finish(self.queue) except Exception as e: print(e) print(traceback.format_exc())
Python
def __request_finish(self, queue_item, new_requests, request_failed=False): """Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored). """ if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
def __request_finish(self, queue_item, new_requests, request_failed=False): """Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored). """ if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
Python
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests): """Convert the scraped requests to queue items, return them and also add them to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. Returns: list(:class:`nyawc.QueueItem`): The new queue items. """ new_queue_items = [] for scraped_request in scraped_requests: HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item) if not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope): continue if self.queue.has_request(scraped_request): continue scraped_request.depth = queue_item.request.depth + 1 if self.__options.scope.max_depth is not None: if scraped_request.depth > self.__options.scope.max_depth: continue new_queue_item = self.queue.add_request(scraped_request) new_queue_items.append(new_queue_item) return new_queue_items
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests): """Convert the scraped requests to queue items, return them and also add them to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. Returns: list(:class:`nyawc.QueueItem`): The new queue items. """ new_queue_items = [] for scraped_request in scraped_requests: HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item) if not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope): continue if self.queue.has_request(scraped_request): continue scraped_request.depth = queue_item.request.depth + 1 if self.__options.scope.max_depth is not None: if scraped_request.depth > self.__options.scope.max_depth: continue new_queue_item = self.queue.add_request(scraped_request) new_queue_items.append(new_queue_item) return new_queue_items
Python
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify): """Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object. """ request_by_method = getattr(requests, method) return request_by_method( url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False )
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify): """Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object. """ request_by_method = getattr(requests, method) return request_by_method( url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False )
Python
def __get_all_scrapers(self): """Find all available scraper references. Returns: list(obj): The scraper references. """ modules_strings = self.__get_all_scrapers_modules() modules = [] for module_string in modules_strings: module = importlib.import_module("nyawc.scrapers." + module_string) modules.append(getattr(module, module_string)) return modules
def __get_all_scrapers(self): """Find all available scraper references. Returns: list(obj): The scraper references. """ modules_strings = self.__get_all_scrapers_modules() modules = [] for module_string in modules_strings: module = importlib.import_module("nyawc.scrapers." + module_string) modules.append(getattr(module, module_string)) return modules
Python
def __get_all_scrapers_modules(self): """Find all available scraper modules. Returns: list(obj): The scraper modules. """ modules = [] file = os.path.realpath(__file__) folder = os.path.dirname(file) for filename in os.listdir(folder + "/../scrapers"): if filename.endswith("Scraper.py") and not filename.startswith("Base"): modules.append(filename[:-3]) return modules
def __get_all_scrapers_modules(self): """Find all available scraper modules. Returns: list(obj): The scraper modules. """ modules = [] file = os.path.realpath(__file__) folder = os.path.dirname(file) for filename in os.listdir(folder + "/../scrapers"): if filename.endswith("Scraper.py") and not filename.startswith("Base"): modules.append(filename[:-3]) return modules
Python
def __content_type_matches(self, content_type, available_content_types): """Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise. """ # 通过content_type判断 # print("The given content type: {0}".format(content_type)) # print("All the available content types: {0}".format(available_content_types)) if content_type is None: return False if content_type in available_content_types: return True for available_content_type in available_content_types: if available_content_type in content_type: return True return False
def __content_type_matches(self, content_type, available_content_types): """Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise. """ # 通过content_type判断 # print("The given content type: {0}".format(content_type)) # print("All the available content types: {0}".format(available_content_types)) if content_type is None: return False if content_type in available_content_types: return True for available_content_type in available_content_types: if available_content_type in content_type: return True return False
Python
def patch_with_options(request, options, parent_queue_item=None): """Patch the given request with the given options (e.g. user agent). Args: request (:class:`nyawc.http.Request`): The request to patch. options (:class:`nyawc.Options`): The options to patch the request with. parent_queue_item (:class:`nyawc.QueueItem`): The parent queue item object (request/response pair) if exists. """ request.auth = copy.deepcopy(options.identity.auth) request.cookies = copy.deepcopy(options.identity.cookies) request.headers = copy.deepcopy(options.identity.headers) request.proxies = copy.deepcopy(options.identity.proxies) request.timeout = copy.copy(options.performance.request_timeout) if parent_queue_item != None: for cookie in parent_queue_item.request.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) for cookie in parent_queue_item.response.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) if options.misc.verify_ssl_certificates and options.misc.trusted_certificates: request.verify = options.misc.trusted_certificates else: request.verify = options.misc.verify_ssl_certificates
def patch_with_options(request, options, parent_queue_item=None): """Patch the given request with the given options (e.g. user agent). Args: request (:class:`nyawc.http.Request`): The request to patch. options (:class:`nyawc.Options`): The options to patch the request with. parent_queue_item (:class:`nyawc.QueueItem`): The parent queue item object (request/response pair) if exists. """ request.auth = copy.deepcopy(options.identity.auth) request.cookies = copy.deepcopy(options.identity.cookies) request.headers = copy.deepcopy(options.identity.headers) request.proxies = copy.deepcopy(options.identity.proxies) request.timeout = copy.copy(options.performance.request_timeout) if parent_queue_item != None: for cookie in parent_queue_item.request.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) for cookie in parent_queue_item.response.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) if options.misc.verify_ssl_certificates and options.misc.trusted_certificates: request.verify = options.misc.trusted_certificates else: request.verify = options.misc.verify_ssl_certificates
Python
def complies_with_scope(queue_item, new_request, scope): """Check if the new request complies with the crawling scope. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. new_request (:class:`nyawc.http.Request`): The request to check. scope (:class:`nyawc.Options.OptionsScope`): The scope to check. Returns: bool: True if it complies, False otherwise. """ if not URLHelper.is_parsable(queue_item.request.url): return False if not URLHelper.is_parsable(new_request.url): return False if scope.request_methods: if not queue_item.request.method in scope.request_methods: return False if scope.protocol_must_match: if URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url): return False if scope.subdomain_must_match: current_subdomain = URLHelper.get_subdomain(queue_item.request.url) new_subdomain = URLHelper.get_subdomain(new_request.url) www_matches = False if current_subdomain == "www" and new_subdomain == "": www_matches = True if new_subdomain == "www" and current_subdomain == "": www_matches = True if not www_matches and current_subdomain != new_subdomain: return False if scope.hostname_must_match: if URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url): return False if scope.tld_must_match: if URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url): return False return True
def complies_with_scope(queue_item, new_request, scope): """Check if the new request complies with the crawling scope. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. new_request (:class:`nyawc.http.Request`): The request to check. scope (:class:`nyawc.Options.OptionsScope`): The scope to check. Returns: bool: True if it complies, False otherwise. """ if not URLHelper.is_parsable(queue_item.request.url): return False if not URLHelper.is_parsable(new_request.url): return False if scope.request_methods: if not queue_item.request.method in scope.request_methods: return False if scope.protocol_must_match: if URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url): return False if scope.subdomain_must_match: current_subdomain = URLHelper.get_subdomain(queue_item.request.url) new_subdomain = URLHelper.get_subdomain(new_request.url) www_matches = False if current_subdomain == "www" and new_subdomain == "": www_matches = True if new_subdomain == "www" and current_subdomain == "": www_matches = True if not www_matches and current_subdomain != new_subdomain: return False if scope.hostname_must_match: if URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url): return False if scope.tld_must_match: if URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url): return False return True
Python
def decompose(self): """Decompose this queue item (set cached variables to None) to free up memory. Note: When setting cached variables to None memory will be released after the garbage collector ran. """ self.__response_soup = None self.decomposed = True
def decompose(self): """Decompose this queue item (set cached variables to None) to free up memory. Note: When setting cached variables to None memory will be released after the garbage collector ran. """ self.__response_soup = None self.decomposed = True
Python
def run(self): """Executes the HTTP call. Note: If this and the parent handler raised an error, the queue item status will be set to errored instead of finished. This is to prevent e.g. 404 recursion. """ try: self.__options.callbacks.request_in_thread_before_start(self.__queue_item) except Exception as e: print(e) new_requests = [] failed = False try: handler = Handler(self.__options, self.__queue_item) new_requests = handler.get_new_requests() # 解析页面获取新的请求 # print(new_requests) # print('---------new_requests------------') try: self.__queue_item.response.raise_for_status() except Exception: if self.__queue_item.request.parent_raised_error: failed = True else: for new_request in new_requests: new_request.parent_raised_error = True except Exception as e: failed = True error_message = "Setting status of '{}' to '{}' because of an HTTP error.".format( self.__queue_item.request.url, QueueItem.STATUS_ERRORED ) DebugHelper.output(self.__options, error_message) DebugHelper.output(self.__options, e) try: self.__options.callbacks.request_on_error(self.__queue_item, str(e)) except Exception as e: print(e) for new_request in new_requests: new_request.parent_url = self.__queue_item.request.url try: self.__options.callbacks.request_in_thread_after_finish(self.__queue_item) except Exception as e: print(e) with self.__callback_lock: self.__callback(self.__queue_item, new_requests, failed)
def run(self): """Executes the HTTP call. Note: If this and the parent handler raised an error, the queue item status will be set to errored instead of finished. This is to prevent e.g. 404 recursion. """ try: self.__options.callbacks.request_in_thread_before_start(self.__queue_item) except Exception as e: print(e) new_requests = [] failed = False try: handler = Handler(self.__options, self.__queue_item) new_requests = handler.get_new_requests() # 解析页面获取新的请求 # print(new_requests) # print('---------new_requests------------') try: self.__queue_item.response.raise_for_status() except Exception: if self.__queue_item.request.parent_raised_error: failed = True else: for new_request in new_requests: new_request.parent_raised_error = True except Exception as e: failed = True error_message = "Setting status of '{}' to '{}' because of an HTTP error.".format( self.__queue_item.request.url, QueueItem.STATUS_ERRORED ) DebugHelper.output(self.__options, error_message) DebugHelper.output(self.__options, e) try: self.__options.callbacks.request_on_error(self.__queue_item, str(e)) except Exception as e: print(e) for new_request in new_requests: new_request.parent_url = self.__queue_item.request.url try: self.__options.callbacks.request_in_thread_after_finish(self.__queue_item) except Exception as e: print(e) with self.__callback_lock: self.__callback(self.__queue_item, new_requests, failed)
Python
def increase_route_count(self, crawled_request): """Increase the count that determines how many times a URL of a certain route has been crawled. Args: crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route. """ for route in self.__routing_options.routes: if re.compile(route).match(crawled_request.url): count_key = str(route) + crawled_request.method if count_key in self.__routing_count.keys(): self.__routing_count[count_key] += 1 else: self.__routing_count[count_key] = 1 break
def increase_route_count(self, crawled_request): """Increase the count that determines how many times a URL of a certain route has been crawled. Args: crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route. """ for route in self.__routing_options.routes: if re.compile(route).match(crawled_request.url): count_key = str(route) + crawled_request.method if count_key in self.__routing_count.keys(): self.__routing_count[count_key] += 1 else: self.__routing_count[count_key] = 1 break
Python
def is_treshold_reached(self, scraped_request): """Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise. """ for route in self.__routing_options.routes: if re.compile(route).match(scraped_request.url): count_key = str(route) + scraped_request.method if count_key in self.__routing_count.keys(): return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold return False
def is_treshold_reached(self, scraped_request): """Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise. """ for route in self.__routing_options.routes: if re.compile(route).match(scraped_request.url): count_key = str(route) + scraped_request.method if count_key in self.__routing_count.keys(): return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold return False
Python
def exception(logger): """ A decorator that wraps the passed in function and logs exceptions should one occur @param logger: The logging object """ def decorator(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except: # log the exception err = "There was an exception in " err += func.__name__ logger.exception(err) # re-raise the exception # raise err return wrapper return decorator
def exception(logger): """ A decorator that wraps the passed in function and logs exceptions should one occur @param logger: The logging object """ def decorator(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except: # log the exception err = "There was an exception in " err += func.__name__ logger.exception(err) # re-raise the exception # raise err return wrapper return decorator
Python
def add_request(self, request): """Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item. """ queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
def add_request(self, request): """Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item. """ queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
Python
def has_request(self, request): """Check if the given request already exists in the queue. Args: request (:class:`nyawc.http.Request`): The request to check. Returns: bool: True if already exists, False otherwise. """ queue_item = QueueItem(request, Response(request.url)) key = queue_item.get_hash() for status in QueueItem.STATUSES: if key in self.__get_var("items_" + status).keys(): return True return False
def has_request(self, request): """Check if the given request already exists in the queue. Args: request (:class:`nyawc.http.Request`): The request to check. Returns: bool: True if already exists, False otherwise. """ queue_item = QueueItem(request, Response(request.url)) key = queue_item.get_hash() for status in QueueItem.STATUSES: if key in self.__get_var("items_" + status).keys(): return True return False
Python
def add(self, queue_item): """Add a request/response pair to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to add. """ hash_key = queue_item.get_hash() # eg: 'gethttpatmos.sysueducn/article/2005OrderedDict([(\\'\\', \\'\\')])' # 用请求方法,协议,请求参数等参数拼接起来做为hash值 items = self.__get_var("items_" + queue_item.status) if hash_key in items.keys(): return items[queue_item.get_hash()] = queue_item self.count_total += 1
def add(self, queue_item): """Add a request/response pair to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to add. """ hash_key = queue_item.get_hash() # eg: 'gethttpatmos.sysueducn/article/2005OrderedDict([(\\'\\', \\'\\')])' # 用请求方法,协议,请求参数等参数拼接起来做为hash值 items = self.__get_var("items_" + queue_item.status) if hash_key in items.keys(): return items[queue_item.get_hash()] = queue_item self.count_total += 1
Python
def move(self, queue_item, status): """Move a request/response pair to another status. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to move status (str): The new status of the queue item. """ items = self.__get_var("items_" + queue_item.status) del items[queue_item.get_hash()] self.count_total -= 1 queue_item.status = status self.add(queue_item)
def move(self, queue_item, status): """Move a request/response pair to another status. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to move status (str): The new status of the queue item. """ items = self.__get_var("items_" + queue_item.status) del items[queue_item.get_hash()] self.count_total -= 1 queue_item.status = status self.add(queue_item)
Python
def move_bulk(self, from_statuses, to_status): """Move a bulk of request/response pairs to another status Args: from_statuses list(str): The statuses to move from to_status (str): The status to move to """ for status in from_statuses: from_status_items = self.__get_var("items_" + status) self.__set_var("items_" + status, OrderedDict()) to_status_items = self.__get_var("items_" + to_status) to_status_items.update(from_status_items)
def move_bulk(self, from_statuses, to_status): """Move a bulk of request/response pairs to another status Args: from_statuses list(str): The statuses to move from to_status (str): The status to move to """ for status in from_statuses: from_status_items = self.__get_var("items_" + status) self.__set_var("items_" + status, OrderedDict()) to_status_items = self.__get_var("items_" + to_status) to_status_items.update(from_status_items)
Python
def __set_var(self, name, value): """Set an instance/class var by name. Args: name (str): The name of the variable. value (obj): I'ts new value. """ setattr(self, name, value)
def __set_var(self, name, value): """Set an instance/class var by name. Args: name (str): The name of the variable. value (obj): I'ts new value. """ setattr(self, name, value)
Python
def __get_var(self, name): """Get an instance/class var by name. Args: name (str): The name of the variable. Returns: obj: I'ts value. """ return getattr(self, name)
def __get_var(self, name): """Get an instance/class var by name. Args: name (str): The name of the variable. Returns: obj: I'ts value. """ return getattr(self, name)
Python
def make_absolute(base, relative): """Make the given (relative) URL absolute. Args: base (str): The absolute URL the relative url was found on. relative (str): The (possibly relative) url to make absolute. Returns: str: The absolute URL. """ # Python 3.4 and lower do not remove folder traversal strings. # This was fixed in 3.5 (https://docs.python.org/3/whatsnew/3.5.html#urllib) while relative.startswith('/../') or relative.startswith('../'): relative = relative[3:] base_parsed = urlparse(base) new_path = base_parsed.path.rsplit('/', 1)[0] base_parsed = base_parsed._replace(path=new_path) base = base_parsed.geturl() return urljoin(base, relative)
def make_absolute(base, relative): """Make the given (relative) URL absolute. Args: base (str): The absolute URL the relative url was found on. relative (str): The (possibly relative) url to make absolute. Returns: str: The absolute URL. """ # Python 3.4 and lower do not remove folder traversal strings. # This was fixed in 3.5 (https://docs.python.org/3/whatsnew/3.5.html#urllib) while relative.startswith('/../') or relative.startswith('../'): relative = relative[3:] base_parsed = urlparse(base) new_path = base_parsed.path.rsplit('/', 1)[0] base_parsed = base_parsed._replace(path=new_path) base = base_parsed.geturl() return urljoin(base, relative)
Python
def append_with_data(url, data): """Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL. """ if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
def append_with_data(url, data): """Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL. """ if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
Python
def is_mailto(url): """Check if the given URL is a mailto URL Args: url (str): The URL to check. Returns: bool: True if mailto, False otherwise. """ return url.startswith("mailto:")
def is_mailto(url): """Check if the given URL is a mailto URL Args: url (str): The URL to check. Returns: bool: True if mailto, False otherwise. """ return url.startswith("mailto:")
Python
def is_parsable(url): """Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise. """ try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True except: return False
def is_parsable(url): """Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise. """ try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True except: return False
Python
def remove_hash(url): """Remove the #hash from the given URL. Args: url (str): The URL to remove the hash from. Returns: str: The URL without the hash """ return url.split("#")[0]
def remove_hash(url): """Remove the #hash from the given URL. Args: url (str): The URL to remove the hash from. Returns: str: The URL without the hash """ return url.split("#")[0]
Python
def query_dict_to_string(query): """Convert an OrderedDict to a query string. Args: query (obj): The key value object with query params. Returns: str: The query string. Note: This method does the same as urllib.parse.urlencode except that it doesn't actually encode the values. """ query_params = [] for key, value in query.items(): query_params.append(key + "=" + value) return "&".join(query_params)
def query_dict_to_string(query): """Convert an OrderedDict to a query string. Args: query (obj): The key value object with query params. Returns: str: The query string. Note: This method does the same as urllib.parse.urlencode except that it doesn't actually encode the values. """ query_params = [] for key, value in query.items(): query_params.append(key + "=" + value) return "&".join(query_params)
Python
def query_string_to_dict(query): """Convert a string to a query dict. Args: query (str): The query string. Returns: obj: The key value object with query params. Note: This method does the same as urllib.parse.parse_qsl except that it doesn't actually decode the values. """ query_params = {} for key_value in query.split("&"): key_value_pair = key_value.split("=", 1) key = key_value_pair[0] if len(key_value_pair) >= 1 else "" value = key_value_pair[1] if len(key_value_pair) == 2 else "" query_params[key] = value return query_params
def query_string_to_dict(query): """Convert a string to a query dict. Args: query (str): The query string. Returns: obj: The key value object with query params. Note: This method does the same as urllib.parse.parse_qsl except that it doesn't actually decode the values. """ query_params = {} for key_value in query.split("&"): key_value_pair = key_value.split("=", 1) key = key_value_pair[0] if len(key_value_pair) >= 1 else "" value = key_value_pair[1] if len(key_value_pair) == 2 else "" query_params[key] = value return query_params
Python
def load(self, path): """Loads the finite automaton from the specified path.""" with open(path) as file: lines = file.readlines() for line in lines: pass
def load(self, path): """Loads the finite automaton from the specified path.""" with open(path) as file: lines = file.readlines() for line in lines: pass
Python
def train_adversarial(model, train_dataset, epochs, layers, target_attack=False, show_perturbation=False, use_mask=False, save_adversarials_to_logs=False): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heaads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up """ # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # From a specific Resnet stage and up "3+": r"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "4+": r"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "5+": r"(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_set = Dataset(train_dataset, model.config, augment=False) train_generator = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=4) model.set_trainable(layers) for epoch in range(model.epoch + 1, epochs + 1): # Training train_adversarial_batch(model, train_generator, target_attack=target_attack, show_perturbation=show_perturbation, use_mask=use_mask, save_adversarials_to_logs=save_adversarials_to_logs)
def train_adversarial(model, train_dataset, epochs, layers, target_attack=False, show_perturbation=False, use_mask=False, save_adversarials_to_logs=False): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heaads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up """ # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # From a specific Resnet stage and up "3+": r"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "4+": r"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "5+": r"(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_set = Dataset(train_dataset, model.config, augment=False) train_generator = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=4) model.set_trainable(layers) for epoch in range(model.epoch + 1, epochs + 1): # Training train_adversarial_batch(model, train_generator, target_attack=target_attack, show_perturbation=show_perturbation, use_mask=use_mask, save_adversarials_to_logs=save_adversarials_to_logs)
Python
def mold_image_tensor(images, config): """Takes RGB images with 0-255 values and subtraces the mean pixel and converts it to float. Expects image colors in RGB order. """ return images - torch.from_numpy(config.MEAN_PIXEL).float().cuda().unsqueeze(1).unsqueeze(2).unsqueeze(0).expand_as(images)
def mold_image_tensor(images, config): """Takes RGB images with 0-255 values and subtraces the mean pixel and converts it to float. Expects image colors in RGB order. """ return images - torch.from_numpy(config.MEAN_PIXEL).float().cuda().unsqueeze(1).unsqueeze(2).unsqueeze(0).expand_as(images)
Python
def crossover(parents, fitness, population): """ Crossover parents to get next generation. Parents are coupled and the features are sampled according to parents' fitness scores. Args: parents (1D array): indexes of sampled parents, [2 * (population_size - 1)] fitness (1D array): population fitness scores, [population_size] population (4D array): current generation features, [population_size x n_channels x h x w] Returns: children (4D array): next generation features, [population_size - 1 x n_features] """ _, nchannels, h, w = population.shape fitness_pairs = fitness[parents.long()].view(-1, 2) prob = fitness_pairs[:, 0] / fitness_pairs.sum(1) parental_bernoulli = td.Bernoulli(prob) inherit_mask = parental_bernoulli.sample_n(nchannels * h * w) # [N-1, nchannels * h * w] inherit_mask = inherit_mask.view(-1, nchannels, h, w) parent_features = population[parents.long()] children = torch.cuda.FloatTensor(inherit_mask.shape) children = where(inherit_mask, parent_features[::2], parent_features[1::2]) return children
def crossover(parents, fitness, population): """ Crossover parents to get next generation. Parents are coupled and the features are sampled according to parents' fitness scores. Args: parents (1D array): indexes of sampled parents, [2 * (population_size - 1)] fitness (1D array): population fitness scores, [population_size] population (4D array): current generation features, [population_size x n_channels x h x w] Returns: children (4D array): next generation features, [population_size - 1 x n_features] """ _, nchannels, h, w = population.shape fitness_pairs = fitness[parents.long()].view(-1, 2) prob = fitness_pairs[:, 0] / fitness_pairs.sum(1) parental_bernoulli = td.Bernoulli(prob) inherit_mask = parental_bernoulli.sample_n(nchannels * h * w) # [N-1, nchannels * h * w] inherit_mask = inherit_mask.view(-1, nchannels, h, w) parent_features = population[parents.long()] children = torch.cuda.FloatTensor(inherit_mask.shape) children = where(inherit_mask, parent_features[::2], parent_features[1::2]) return children
Python
def attack(x, target, delta, alpha, p, N, G, net): ''' Attacks the black box model in `net` by generating and evolving population of attacking examples. Args: x (4D array): Original example of size [1, nchannels, h, w] target (1D array): target descriptor alpha (float): the parameter controlling mutation amount delta (float): the parametr controlling mutation amount p (float): the parameter for Bernoulli distribution used in mutation N (integer): the size of population G (integer): the number of generations to evolve through net: black box model to attack Returns: Pcurrent: evolved population of adversarial examples of the original `x` targeted with `target` descriptor to attack black box model with. ''' # mse = torch.nn.MSELoss(reduce=False).cuda() bernoulli = td.Bernoulli(p) softmax = torch.nn.Softmax(0).cuda() # generate starting population nchannels, h, w = x.shape mutation = get_mutation([N, nchannels, h, w], alpha, delta, bernoulli) # init current population Pcurrent = x[None, :, :, :].expand(N, -1, -1, -1) + mutation Pnext = torch.zeros_like(Pcurrent) # init previous population with original example Pprev = x[None, :, :, :].expand(N, -1, -1, -1) # compute constraints to ensure permissible distance from the original example lo = x.min() - alpha[0] * delta[0] hi = x.max() + alpha[0] * delta[0] # init log log = [] # start evolution t = trange(G) for g in t: # measure fitness with MSE between descriptors fitness = get_fitness(Pcurrent, target, net) # [N] # Log fitness log.append(fitness) # check SSIM ssimm = np.zeros(N) for i in range(N): ssimm[i] = ssim(x.squeeze().permute(1, 2, 0).cpu().numpy(), Pcurrent[i].permute(1, 2, 0).cpu().numpy(), multichannel=True) # [N] #survivors = ssimm >= 0.95 # [N] survivors = ssimm >= 0.40 # [N] # Update description t.set_description("Avg fitness: %f; avg ssimm %f" % (fitness.mean(), ssimm.mean())) t.refresh() # "Save" first output in console to compare if g == 1: print("\n") if survivors.sum() == 0: print('All candidates died.') return Pprev, log # End if g == G - 1: return Pcurrent, log # choose the best fit candidate among population _, best = torch.min(fitness, 0) # get idx of the best fitted candidate # ensure the best candidate gets a place in the next population Pnext[0] = Pcurrent[best] # generate next population # TODO: not survivors are ignored atm #probs = softmax(Variable(torch.cuda.FloatTensor(survivors)) * Variable(fitness)).data probs = softmax(Variable(-fitness)).data cat = td.Categorical(probs[None, :].expand(2 * (N - 1), -1)) parents = cat.sample() # sample 2 parents per child, total number of children is N-1 children = crossover(parents, fitness, Pcurrent) # [(N-1) x nchannels x h x w] mutation = get_mutation([N - 1, nchannels, h, w], alpha, delta, bernoulli) children = children + mutation Pnext[1:] = children Pprev = Pcurrent # update previous generation Pcurrent = Pnext # update current generation # clip to ensure the distance constraints Pcurrent = torch.clamp(Pcurrent, lo, hi) return Pcurrent, log
def attack(x, target, delta, alpha, p, N, G, net): ''' Attacks the black box model in `net` by generating and evolving population of attacking examples. Args: x (4D array): Original example of size [1, nchannels, h, w] target (1D array): target descriptor alpha (float): the parameter controlling mutation amount delta (float): the parametr controlling mutation amount p (float): the parameter for Bernoulli distribution used in mutation N (integer): the size of population G (integer): the number of generations to evolve through net: black box model to attack Returns: Pcurrent: evolved population of adversarial examples of the original `x` targeted with `target` descriptor to attack black box model with. ''' # mse = torch.nn.MSELoss(reduce=False).cuda() bernoulli = td.Bernoulli(p) softmax = torch.nn.Softmax(0).cuda() # generate starting population nchannels, h, w = x.shape mutation = get_mutation([N, nchannels, h, w], alpha, delta, bernoulli) # init current population Pcurrent = x[None, :, :, :].expand(N, -1, -1, -1) + mutation Pnext = torch.zeros_like(Pcurrent) # init previous population with original example Pprev = x[None, :, :, :].expand(N, -1, -1, -1) # compute constraints to ensure permissible distance from the original example lo = x.min() - alpha[0] * delta[0] hi = x.max() + alpha[0] * delta[0] # init log log = [] # start evolution t = trange(G) for g in t: # measure fitness with MSE between descriptors fitness = get_fitness(Pcurrent, target, net) # [N] # Log fitness log.append(fitness) # check SSIM ssimm = np.zeros(N) for i in range(N): ssimm[i] = ssim(x.squeeze().permute(1, 2, 0).cpu().numpy(), Pcurrent[i].permute(1, 2, 0).cpu().numpy(), multichannel=True) # [N] #survivors = ssimm >= 0.95 # [N] survivors = ssimm >= 0.40 # [N] # Update description t.set_description("Avg fitness: %f; avg ssimm %f" % (fitness.mean(), ssimm.mean())) t.refresh() # "Save" first output in console to compare if g == 1: print("\n") if survivors.sum() == 0: print('All candidates died.') return Pprev, log # End if g == G - 1: return Pcurrent, log # choose the best fit candidate among population _, best = torch.min(fitness, 0) # get idx of the best fitted candidate # ensure the best candidate gets a place in the next population Pnext[0] = Pcurrent[best] # generate next population # TODO: not survivors are ignored atm #probs = softmax(Variable(torch.cuda.FloatTensor(survivors)) * Variable(fitness)).data probs = softmax(Variable(-fitness)).data cat = td.Categorical(probs[None, :].expand(2 * (N - 1), -1)) parents = cat.sample() # sample 2 parents per child, total number of children is N-1 children = crossover(parents, fitness, Pcurrent) # [(N-1) x nchannels x h x w] mutation = get_mutation([N - 1, nchannels, h, w], alpha, delta, bernoulli) children = children + mutation Pnext[1:] = children Pprev = Pcurrent # update previous generation Pcurrent = Pnext # update current generation # clip to ensure the distance constraints Pcurrent = torch.clamp(Pcurrent, lo, hi) return Pcurrent, log
Python
def train_adversarial(model, train_dataset, epochs, layers, target_attack=False): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heaads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up """ # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # From a specific Resnet stage and up "3+": r"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "4+": r"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "5+": r"(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_set = Dataset(train_dataset, model.config, augment=False) train_generator = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=4) model.set_trainable(layers) for epoch in range(model.epoch + 1, epochs + 1): # Training train_adversarial_batch(model, train_generator, target_attack=target_attack)
def train_adversarial(model, train_dataset, epochs, layers, target_attack=False): """Train the model. train_dataset, val_dataset: Training and validation Dataset objects. learning_rate: The learning rate to train with epochs: Number of training epochs. Note that previous training epochs are considered to be done alreay, so this actually determines the epochs to train in total rather than in this particaular call. layers: Allows selecting wich layers to train. It can be: - A regular expression to match layer names to train - One of these predefined values: heaads: The RPN, classifier and mask heads of the network all: All the layers 3+: Train Resnet stage 3 and up 4+: Train Resnet stage 4 and up 5+: Train Resnet stage 5 and up """ # Pre-defined layer regular expressions layer_regex = { # all layers but the backbone "heads": r"(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # From a specific Resnet stage and up "3+": r"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "4+": r"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", "5+": r"(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)", # All layers "all": ".*", } if layers in layer_regex.keys(): layers = layer_regex[layers] # Data generators train_set = Dataset(train_dataset, model.config, augment=False) train_generator = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=4) model.set_trainable(layers) for epoch in range(model.epoch + 1, epochs + 1): # Training train_adversarial_batch(model, train_generator, target_attack=target_attack)
Python
def attack_random_noise(self, img): """ Creates random noise as attack to img :param img: input image :return: random noise """ noise = torch.rand(img.size()) noise = noise * 0.07 return noise
def attack_random_noise(self, img): """ Creates random noise as attack to img :param img: input image :return: random noise """ noise = torch.rand(img.size()) noise = noise * 0.07 return noise
Python
def attack_FGSM(self, img, target=-1): """ Using the Fast Gradient Sign Method (FGSM) to attack image img by adding stepwise the sign of the image gradient. For target attack the gradient gets subtracted to reach the desired class. :param img: input image :param target: target class. -1 for non target attack :return: perturbation """ # Definitions steps = 10 step_alpha = 0.001 eps = 2 * 8 / 225. label = torch.zeros(1, 1) img = Variable(img) label = Variable(label) # Define label variable. Target for target attack. if target == -1: # Perform forwardpass to get prediction of the original image output = self.model(img) label.data = output.max(1)[1].data else: label.data = torch.Tensor([target]).type(torch.LongTensor) # Clone so img gets not manipulated img_adv = img.clone() img_adv.requires_grad = True for step in range(steps): zero_gradients(img_adv) out = self.model(img_adv) # Calculate loss, gradient and normed gradient based on the sign of the gradient _loss = self.loss(out, label) _loss.backward() normed_grad = step_alpha * torch.sign(img_adv.grad.data) # Add/Subtract perturbation if target == -1: step_adv = img_adv.data + normed_grad else: step_adv = img_adv.data - normed_grad # Postprocessing perturbation adv = step_adv - img.data adv = torch.clamp(adv, -eps, eps) # Max eps range result = img.data + adv result = torch.clamp(result, 0.0, 1.0) # Image range adv = result - img.data # Set adversarial image as new input img_adv.data = result print("Step: {0}, Loss: {1:.2f}, Top1: {2}".format(step, _loss.data[0], self.labels[out.data.numpy().argmax()])) return adv
def attack_FGSM(self, img, target=-1): """ Using the Fast Gradient Sign Method (FGSM) to attack image img by adding stepwise the sign of the image gradient. For target attack the gradient gets subtracted to reach the desired class. :param img: input image :param target: target class. -1 for non target attack :return: perturbation """ # Definitions steps = 10 step_alpha = 0.001 eps = 2 * 8 / 225. label = torch.zeros(1, 1) img = Variable(img) label = Variable(label) # Define label variable. Target for target attack. if target == -1: # Perform forwardpass to get prediction of the original image output = self.model(img) label.data = output.max(1)[1].data else: label.data = torch.Tensor([target]).type(torch.LongTensor) # Clone so img gets not manipulated img_adv = img.clone() img_adv.requires_grad = True for step in range(steps): zero_gradients(img_adv) out = self.model(img_adv) # Calculate loss, gradient and normed gradient based on the sign of the gradient _loss = self.loss(out, label) _loss.backward() normed_grad = step_alpha * torch.sign(img_adv.grad.data) # Add/Subtract perturbation if target == -1: step_adv = img_adv.data + normed_grad else: step_adv = img_adv.data - normed_grad # Postprocessing perturbation adv = step_adv - img.data adv = torch.clamp(adv, -eps, eps) # Max eps range result = img.data + adv result = torch.clamp(result, 0.0, 1.0) # Image range adv = result - img.data # Set adversarial image as new input img_adv.data = result print("Step: {0}, Loss: {1:.2f}, Top1: {2}".format(step, _loss.data[0], self.labels[out.data.numpy().argmax()])) return adv
Python
def normalEquation(x,y): """ Parameteres: input variables (Table) , Target vector Instructions: Complete the code to compute the closed form solution to linear regression and save the result in theta. Return: coefficinets """ theta=[] x_t=np.transpose(x) theta=np.linalg.inv(x_t.dot(x)) theta=theta.dot(x_t) theta=theta.dot(y) return theta
def normalEquation(x,y): """ Parameteres: input variables (Table) , Target vector Instructions: Complete the code to compute the closed form solution to linear regression and save the result in theta. Return: coefficinets """ theta=[] x_t=np.transpose(x) theta=np.linalg.inv(x_t.dot(x)) theta=theta.dot(x_t) theta=theta.dot(y) return theta
Python
def gradient_descent(x,y,theta,i,r): """ Paramters: input variable , Target variable, theta, number of iteration, learning_rate Instructions: Complete the code to compute the iterative solution to linear regression, in each iteration you will add the cost of the iteration to a an empty list name cost_hisotry and update the theta. Return: theta, cost_history """ n=float(len(y)) cost_history = [] theta1=[] theta2=[] # Your code goes here x_t=np.transpose(x) for i in range(0,i): theta1.append(theta.item(0,0)) theta2.append(theta.item(1,0)) ytrain=x.dot(theta) ydiff=y-ytrain cost=(np.transpose(ydiff).dot(ydiff))/(2*n) cost_history.append(float(cost)) thetagrad=((x_t).dot(ydiff))/n theta=theta+r*(thetagrad) return theta, cost_history,theta1,theta2
def gradient_descent(x,y,theta,i,r): """ Paramters: input variable , Target variable, theta, number of iteration, learning_rate Instructions: Complete the code to compute the iterative solution to linear regression, in each iteration you will add the cost of the iteration to a an empty list name cost_hisotry and update the theta. Return: theta, cost_history """ n=float(len(y)) cost_history = [] theta1=[] theta2=[] # Your code goes here x_t=np.transpose(x) for i in range(0,i): theta1.append(theta.item(0,0)) theta2.append(theta.item(1,0)) ytrain=x.dot(theta) ydiff=y-ytrain cost=(np.transpose(ydiff).dot(ydiff))/(2*n) cost_history.append(float(cost)) thetagrad=((x_t).dot(ydiff))/n theta=theta+r*(thetagrad) return theta, cost_history,theta1,theta2
Python
def serialize_instance(self, instance): """ Serializes given Model instance as JSON and encodes it as base64. """ json_instance = serialize("json", [instance], ensure_ascii=False)[1:-1] return base64.b64encode(json_instance.encode("utf-8")).decode("utf-8")
def serialize_instance(self, instance): """ Serializes given Model instance as JSON and encodes it as base64. """ json_instance = serialize("json", [instance], ensure_ascii=False)[1:-1] return base64.b64encode(json_instance.encode("utf-8")).decode("utf-8")
Python
def lexical_specificity(T, t, f, k, lim: int = 400): """ calculate lexical specificity, originally implemented by Jose :param T: a size of reference corpus :param t: a size of sub-corpus (t <= T) :param f: a frequency of w in reference corpus :param k: a frequency of w in sub-corpus (f <= k) :param lim: :return: float score """ def stirling(n): return n * log(n) - n + (0.5 * log(2 * 3.141592 * n)) assert t <= T, "t exceeds T" assert k <= f, "f ({}) exceeds k ({})".format(f, k) if f > lim: arg1 = stirling(f) else: arg1 = log(factorial(int(f))) if T - f > lim: arg2 = stirling(T - f) else: arg2 = log(factorial(int(T - f))) if t > lim: arg3 = stirling(t) else: arg3 = log(factorial(int(t))) if T - t > lim: arg4 = stirling(T - t) else: arg4 = log(factorial(int(T - t))) if T > lim: arg5 = stirling(T) else: arg5 = log(factorial(int(T))) if k > lim: arg6 = stirling(k) else: arg6 = log(factorial(int(k))) if f - k > lim: arg7 = stirling(f - k) else: arg7 = log(factorial(int(f - k))) if t - k > lim: arg8 = stirling(t - k) else: arg8 = log(factorial(int(t - k))) if T - f - t + k > lim: arg9 = stirling(T - f - t + k) else: arg9 = log(factorial(int(T - f - t + k))) prob = arg1 + arg2 + arg3 + arg4 - arg5 - arg6 - arg7 - arg8 - arg9 first_prod = -log10(math.e) * prob if prob < log(0.1): prob1 = 1.0 prob = 1.0 while prob1 != 0.0 and (prob / prob1) < 10000000 and k <= f: prob2 = (prob1 * (f - k) * (t - k)) / ((k + 1) * (T - f - t + k + 1)) prob = prob + prob2 prob1 = prob2 k += 1 score = first_prod - log10(prob) return score else: return 0
def lexical_specificity(T, t, f, k, lim: int = 400): """ calculate lexical specificity, originally implemented by Jose :param T: a size of reference corpus :param t: a size of sub-corpus (t <= T) :param f: a frequency of w in reference corpus :param k: a frequency of w in sub-corpus (f <= k) :param lim: :return: float score """ def stirling(n): return n * log(n) - n + (0.5 * log(2 * 3.141592 * n)) assert t <= T, "t exceeds T" assert k <= f, "f ({}) exceeds k ({})".format(f, k) if f > lim: arg1 = stirling(f) else: arg1 = log(factorial(int(f))) if T - f > lim: arg2 = stirling(T - f) else: arg2 = log(factorial(int(T - f))) if t > lim: arg3 = stirling(t) else: arg3 = log(factorial(int(t))) if T - t > lim: arg4 = stirling(T - t) else: arg4 = log(factorial(int(T - t))) if T > lim: arg5 = stirling(T) else: arg5 = log(factorial(int(T))) if k > lim: arg6 = stirling(k) else: arg6 = log(factorial(int(k))) if f - k > lim: arg7 = stirling(f - k) else: arg7 = log(factorial(int(f - k))) if t - k > lim: arg8 = stirling(t - k) else: arg8 = log(factorial(int(t - k))) if T - f - t + k > lim: arg9 = stirling(T - f - t + k) else: arg9 = log(factorial(int(T - f - t + k))) prob = arg1 + arg2 + arg3 + arg4 - arg5 - arg6 - arg7 - arg8 - arg9 first_prod = -log10(math.e) * prob if prob < log(0.1): prob1 = 1.0 prob = 1.0 while prob1 != 0.0 and (prob / prob1) < 10000000 and k <= f: prob2 = (prob1 * (f - k) * (t - k)) / ((k + 1) * (T - f - t + k + 1)) prob = prob + prob2 prob1 = prob2 k += 1 score = first_prod - log10(prob) return score else: return 0
Python
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_dict = "{}/lexical_specificity_frequency.json".format(directory) assert os.path.exists(path_to_dict), 'no dictionary found: {}'.format(path_to_dict) with open(path_to_dict, 'r') as f: self.freq = json.load(f) self.reference_corpus_size = sum(self.freq.values()) logging.debug('loaded frequency dictionary from {}'.format(path_to_dict))
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_dict = "{}/lexical_specificity_frequency.json".format(directory) assert os.path.exists(path_to_dict), 'no dictionary found: {}'.format(path_to_dict) with open(path_to_dict, 'r') as f: self.freq = json.load(f) self.reference_corpus_size = sum(self.freq.values()) logging.debug('loaded frequency dictionary from {}'.format(path_to_dict))
Python
def add(self, raw: str, stemmed: str, pos: str, offset: int): """ add single token, integrate it as phrase or keep as part of phrase Parameter ------------------- raw: token in raw form stemmed: stemmed token pos: Part of speech offset: offset position """ def add_tmp_list(): # add to tmp list self.__tmp_phrase_raw.append(raw) self.__tmp_phrase_stemmed.append(stemmed) self.__tmp_phrase_pos.append(pos) self.__tmp_phrase_offset.append(offset) # phrase with more symbol than alphanumeric should be ignored if len(re.sub(r'\w', '', stemmed)) > len(re.sub(r'\W', '', stemmed)): pos = 'RANDOM' if pos == 'NOUN' or (pos == 'ADJ' and 'NOUN' not in self.__tmp_phrase_pos): add_tmp_list() else: if 'NOUN' in self.__tmp_phrase_pos: # finalize list of tokens as phrase self.__add_to_structure() else: # only adjective can't be regarded as phrase self.__initialize_list()
def add(self, raw: str, stemmed: str, pos: str, offset: int): """ add single token, integrate it as phrase or keep as part of phrase Parameter ------------------- raw: token in raw form stemmed: stemmed token pos: Part of speech offset: offset position """ def add_tmp_list(): # add to tmp list self.__tmp_phrase_raw.append(raw) self.__tmp_phrase_stemmed.append(stemmed) self.__tmp_phrase_pos.append(pos) self.__tmp_phrase_offset.append(offset) # phrase with more symbol than alphanumeric should be ignored if len(re.sub(r'\w', '', stemmed)) > len(re.sub(r'\W', '', stemmed)): pos = 'RANDOM' if pos == 'NOUN' or (pos == 'ADJ' and 'NOUN' not in self.__tmp_phrase_pos): add_tmp_list() else: if 'NOUN' in self.__tmp_phrase_pos: # finalize list of tokens as phrase self.__add_to_structure() else: # only adjective can't be regarded as phrase self.__initialize_list()
Python
def __add_to_structure(self): """ add to main dictionary and initialize tmp lists """ phrase_stemmed = self.__joiner.join(self.__tmp_phrase_stemmed) # too many words or too long character should be ignored if (self.__maximum_word_number and len(self.__tmp_phrase_stemmed) > self.__maximum_word_number) or \ (self.__maximum_char_number and len(phrase_stemmed) > self.__maximum_char_number): self.__initialize_list() return if len(self.__tmp_phrase_offset) == 1: offset = [self.__tmp_phrase_offset[0], self.__tmp_phrase_offset[0] + 1] else: offset = [self.__tmp_phrase_offset[0], self.__tmp_phrase_offset[-1]] # key of main dictionary if phrase_stemmed in self.__content.keys(): self.__content[phrase_stemmed]['raw'] += [self.__joiner.join(self.__tmp_phrase_raw)] self.__content[phrase_stemmed]['offset'] += [offset] self.__content[phrase_stemmed]['count'] += 1 else: self.__content[phrase_stemmed] = dict( stemmed=self.__joiner.join(self.__tmp_phrase_stemmed), pos=' '.join(self.__tmp_phrase_pos), raw=[self.__joiner.join(self.__tmp_phrase_raw)], offset=[offset], count=1) # initialize tmp lists self.__initialize_list() return
def __add_to_structure(self): """ add to main dictionary and initialize tmp lists """ phrase_stemmed = self.__joiner.join(self.__tmp_phrase_stemmed) # too many words or too long character should be ignored if (self.__maximum_word_number and len(self.__tmp_phrase_stemmed) > self.__maximum_word_number) or \ (self.__maximum_char_number and len(phrase_stemmed) > self.__maximum_char_number): self.__initialize_list() return if len(self.__tmp_phrase_offset) == 1: offset = [self.__tmp_phrase_offset[0], self.__tmp_phrase_offset[0] + 1] else: offset = [self.__tmp_phrase_offset[0], self.__tmp_phrase_offset[-1]] # key of main dictionary if phrase_stemmed in self.__content.keys(): self.__content[phrase_stemmed]['raw'] += [self.__joiner.join(self.__tmp_phrase_raw)] self.__content[phrase_stemmed]['offset'] += [offset] self.__content[phrase_stemmed]['count'] += 1 else: self.__content[phrase_stemmed] = dict( stemmed=self.__joiner.join(self.__tmp_phrase_stemmed), pos=' '.join(self.__tmp_phrase_pos), raw=[self.__joiner.join(self.__tmp_phrase_raw)], offset=[offset], count=1) # initialize tmp lists self.__initialize_list() return
Python
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_model = os.path.join(directory, 'tfidf_model') path_to_dict = os.path.join(directory, 'tfidf_dict') assert os.path.exists(path_to_model), 'no model found: {}'.format(path_to_model) assert os.path.exists(path_to_dict), 'no dict found: {}'.format(path_to_dict) logging.debug('loading model...') self.__model = gensim.models.TfidfModel.load(path_to_model) self.__dict = gensim.corpora.Dictionary.load_from_text(path_to_dict) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()])
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_model = os.path.join(directory, 'tfidf_model') path_to_dict = os.path.join(directory, 'tfidf_dict') assert os.path.exists(path_to_model), 'no model found: {}'.format(path_to_model) assert os.path.exists(path_to_dict), 'no dict found: {}'.format(path_to_dict) logging.debug('loading model...') self.__model = gensim.models.TfidfModel.load(path_to_model) self.__dict = gensim.corpora.Dictionary.load_from_text(path_to_dict) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()])
Python
def train(self, data: list, export_directory: str = None, normalize: bool = True): """ TFIDF training Parameter ---------------- data: list of document (list of string) """ # get stemmed token stemmed_tokens = list(map(lambda x: self.phrase_constructor.tokenize_and_stem(x), data)) # build TFIDF logging.debug("building corpus...") self.__dict = corpora.Dictionary(stemmed_tokens) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()]) corpus = [self.__dict.doc2bow(text) for text in stemmed_tokens] logging.debug("training model...") self.__model = gensim.models.TfidfModel(corpus=corpus, normalize=normalize) logging.debug("saving model and corpus at {}".format(export_directory)) self.save(export_directory)
def train(self, data: list, export_directory: str = None, normalize: bool = True): """ TFIDF training Parameter ---------------- data: list of document (list of string) """ # get stemmed token stemmed_tokens = list(map(lambda x: self.phrase_constructor.tokenize_and_stem(x), data)) # build TFIDF logging.debug("building corpus...") self.__dict = corpora.Dictionary(stemmed_tokens) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()]) corpus = [self.__dict.doc2bow(text) for text in stemmed_tokens] logging.debug("training model...") self.__model = gensim.models.TfidfModel(corpus=corpus, normalize=normalize) logging.debug("saving model and corpus at {}".format(export_directory)) self.save(export_directory)
Python
def distribution_word(self, document: str): """ word distribution of given document with pre-calculate TFIDF matrix Parameter ---------------- tokens: document to get topic distribution Return ----------- dict((word_0, prob_0), ..., (word_n, prob_n)) in order of probability Note that `n` is dynamically changing based on the coverage of probability and the probability itself will change slightly due to the randomness of sampling """ assert self.is_trained, 'training before run any inference' # get stemmed token stemmed_tokens = self.phrase_constructor.tokenize_and_stem(document) bow = self.__dict.doc2bow(stemmed_tokens) dist = dict((self.__dict.id2token[w_id], p) for w_id, p in self.__model[bow]) return dist
def distribution_word(self, document: str): """ word distribution of given document with pre-calculate TFIDF matrix Parameter ---------------- tokens: document to get topic distribution Return ----------- dict((word_0, prob_0), ..., (word_n, prob_n)) in order of probability Note that `n` is dynamically changing based on the coverage of probability and the probability itself will change slightly due to the randomness of sampling """ assert self.is_trained, 'training before run any inference' # get stemmed token stemmed_tokens = self.phrase_constructor.tokenize_and_stem(document) bow = self.__dict.doc2bow(stemmed_tokens) dist = dict((self.__dict.id2token[w_id], p) for w_id, p in self.__model[bow]) return dist
Python
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_model = os.path.join(directory, 'lda_model') path_to_dict = os.path.join(directory, 'lda_dict') assert os.path.exists(path_to_model), 'no model found: {}'.format(path_to_model) assert os.path.exists(path_to_dict), 'no dict found: {}'.format(path_to_dict) logging.debug('loading model...') self.__model = gensim.models.ldamodel.LdaModel.load(path_to_model) self.__dict = gensim.corpora.Dictionary.load_from_text(path_to_dict) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()])
def load(self, directory: str = CACHE_DIR): """ load saved lda model and dictionary instance used to train the model """ path_to_model = os.path.join(directory, 'lda_model') path_to_dict = os.path.join(directory, 'lda_dict') assert os.path.exists(path_to_model), 'no model found: {}'.format(path_to_model) assert os.path.exists(path_to_dict), 'no dict found: {}'.format(path_to_dict) logging.debug('loading model...') self.__model = gensim.models.ldamodel.LdaModel.load(path_to_model) self.__dict = gensim.corpora.Dictionary.load_from_text(path_to_dict) self.__dict.id2token = dict([(v, k) for k, v in self.__dict.token2id.items()])
Python
def topic_clustering(self, stemmed_phrases: list): """ grouping given phrases to topic based on HAC by there tokens Parameter -------------------- stemmed_phrases: list list of stemmed keywords Return -------------------- grouped_phrase: list grouped keywords """ unique_token = set(chain(*[p.split() for p in stemmed_phrases])) token_to_id = dict([(t, i) for i, t in enumerate(unique_token)]) # convert phrases to vector spanned by unique token matrix = np.zeros((len(stemmed_phrases), len(unique_token))) for n, p in enumerate(stemmed_phrases): indices = [token_to_id[_p] for _p in set(p.split())] matrix[n, indices] = 1 # calculate distance distance = pdist(matrix, 'jaccard') # compute the clusters links = linkage(distance, method=self.__linkage_method) # get cluster id: len(clusters) == len(stemmed_phrases) clusters = fcluster(links, t=self.__clustering_threshold, criterion='distance') grouped_phrase = [np.array(stemmed_phrases)[clusters == c_id].tolist() for c_id in set(clusters)] return grouped_phrase
def topic_clustering(self, stemmed_phrases: list): """ grouping given phrases to topic based on HAC by there tokens Parameter -------------------- stemmed_phrases: list list of stemmed keywords Return -------------------- grouped_phrase: list grouped keywords """ unique_token = set(chain(*[p.split() for p in stemmed_phrases])) token_to_id = dict([(t, i) for i, t in enumerate(unique_token)]) # convert phrases to vector spanned by unique token matrix = np.zeros((len(stemmed_phrases), len(unique_token))) for n, p in enumerate(stemmed_phrases): indices = [token_to_id[_p] for _p in set(p.split())] matrix[n, indices] = 1 # calculate distance distance = pdist(matrix, 'jaccard') # compute the clusters links = linkage(distance, method=self.__linkage_method) # get cluster id: len(clusters) == len(stemmed_phrases) clusters = fcluster(links, t=self.__clustering_threshold, criterion='distance') grouped_phrase = [np.array(stemmed_phrases)[clusters == c_id].tolist() for c_id in set(clusters)] return grouped_phrase
Python
def array_to_niis(data, mask): """ Converts masked nii 4D array to 4D niimg """ mask_img = nib.load(mask) data_ = np.zeros(data.shape[:1] + mask_img.shape) data_[:, mask_img.get_data().astype(np.bool)] = data data_ = np.transpose(data_, axes=(1, 2, 3, 0)) return nib.Nifti1Image(data_, mask_img.get_affine())
def array_to_niis(data, mask): """ Converts masked nii 4D array to 4D niimg """ mask_img = nib.load(mask) data_ = np.zeros(data.shape[:1] + mask_img.shape) data_[:, mask_img.get_data().astype(np.bool)] = data data_ = np.transpose(data_, axes=(1, 2, 3, 0)) return nib.Nifti1Image(data_, mask_img.get_affine())
Python
def array_to_nii(data, mask): """ Converts masked nii 3D array to 3D niimg """ mask_img = nib.load(mask) data_ = np.zeros(mask_img.shape) data_[mask_img.get_data().astype(np.bool)] = data return nib.Nifti1Image(data_, mask_img.get_affine())
def array_to_nii(data, mask): """ Converts masked nii 3D array to 3D niimg """ mask_img = nib.load(mask) data_ = np.zeros(mask_img.shape) data_[mask_img.get_data().astype(np.bool)] = data return nib.Nifti1Image(data_, mask_img.get_affine())
Python
def _get_subjects_and_description(base_dir, prefix, exclusion_file='excluded_subjects.txt', description_csv='description_file.csv'): """ Returns list of subjects and phenotypic dataframe """ # load files and get dirs BASE_DIR = _get_data_base_dir(base_dir) subject_paths = sorted(glob.glob(os.path.join(BASE_DIR, prefix))) fname = os.path.join(BASE_DIR, exclusion_file) if not os.path.isfile(fname): raise OSError('%s not found ...' % fname) excluded_subjects = [] if os.stat(fname).st_size > 0: excluded_subjects = np.loadtxt(fname, dtype=bytes).astype(str) fname = os.path.join(BASE_DIR, description_csv) if not os.path.isfile(fname): raise OSError('%s not found ...' % fname) description = pd.read_csv(fname) # exclude bad QC subjects excluded_subjects = list(excluded_subjects) excluded_paths = np.array([os.path.join(BASE_DIR, x) for x in excluded_subjects]) subject_paths = np.setdiff1d(subject_paths, excluded_paths) # get subject_id subjects = [os.path.split(s)[-1] for s in subject_paths] return subjects, subject_paths, description
def _get_subjects_and_description(base_dir, prefix, exclusion_file='excluded_subjects.txt', description_csv='description_file.csv'): """ Returns list of subjects and phenotypic dataframe """ # load files and get dirs BASE_DIR = _get_data_base_dir(base_dir) subject_paths = sorted(glob.glob(os.path.join(BASE_DIR, prefix))) fname = os.path.join(BASE_DIR, exclusion_file) if not os.path.isfile(fname): raise OSError('%s not found ...' % fname) excluded_subjects = [] if os.stat(fname).st_size > 0: excluded_subjects = np.loadtxt(fname, dtype=bytes).astype(str) fname = os.path.join(BASE_DIR, description_csv) if not os.path.isfile(fname): raise OSError('%s not found ...' % fname) description = pd.read_csv(fname) # exclude bad QC subjects excluded_subjects = list(excluded_subjects) excluded_paths = np.array([os.path.join(BASE_DIR, x) for x in excluded_subjects]) subject_paths = np.setdiff1d(subject_paths, excluded_paths) # get subject_id subjects = [os.path.split(s)[-1] for s in subject_paths] return subjects, subject_paths, description
Python
def _rid_to_ptid(rid, roster): """Returns patient id for a given rid """ ptid = roster[roster.RID == rid]['PTID'].values if len(ptid) > 0: return ptid[0] else: return ''
def _rid_to_ptid(rid, roster): """Returns patient id for a given rid """ ptid = roster[roster.RID == rid]['PTID'].values if len(ptid) > 0: return ptid[0] else: return ''
Python
def _find_closest_exam_date(acq_date, exam_dates): """Returns closest date and indice of the closest exam_date from acq_date""" diff = [abs(acq_date - e) for e in exam_dates] ind = np.argmin(diff) return exam_dates[ind], ind
def _find_closest_exam_date(acq_date, exam_dates): """Returns closest date and indice of the closest exam_date from acq_date""" diff = [abs(acq_date - e) for e in exam_dates] ind = np.argmin(diff) return exam_dates[ind], ind
Python
def _diff_visits(vis_1, vis_2): """Returns a numerical difference between two visits """ # First, we convert visits v = map(lambda x: 0 if(x in ['bl', 'sc', 'uns1', 'scmri', 'nv', 'f']) else int(x[1:]), [vis_1, vis_2]) # Then, we substract return np.absolute(v[0] - v[1])
def _diff_visits(vis_1, vis_2): """Returns a numerical difference between two visits """ # First, we convert visits v = map(lambda x: 0 if(x in ['bl', 'sc', 'uns1', 'scmri', 'nv', 'f']) else int(x[1:]), [vis_1, vis_2]) # Then, we substract return np.absolute(v[0] - v[1])
Python
def _find_closest_exam_code(viscode, exam_codes): """Returns the indice and the code of the current viscode """ ind = np.argwhere(exam_codes == viscode) if len(ind) > 0: ind = ind[0, 0] else: diff = [_diff_visits(viscode, e) for e in exam_codes] ind = np.argmin(diff) return viscode, ind
def _find_closest_exam_code(viscode, exam_codes): """Returns the indice and the code of the current viscode """ ind = np.argwhere(exam_codes == viscode) if len(ind) > 0: ind = ind[0, 0] else: diff = [_diff_visits(viscode, e) for e in exam_codes] ind = np.argmin(diff) return viscode, ind
Python
def _get_vcodes(rid, exam_date, dx): """ Returns visit codes of an exam_date of a subject """ vcodes = dx[(dx.RID == rid) & (dx.EXAMDATE == exam_date)]['VISCODE'].values vcodes2 =\ dx[(dx.RID == rid) & (dx.EXAMDATE == exam_date)]['VISCODE2'].values if not vcodes.any(): vcodes = [np.nan] if not vcodes2.any(): vcodes2 = [np.nan] return [vcodes[0], vcodes2[0]]
def _get_vcodes(rid, exam_date, dx): """ Returns visit codes of an exam_date of a subject """ vcodes = dx[(dx.RID == rid) & (dx.EXAMDATE == exam_date)]['VISCODE'].values vcodes2 =\ dx[(dx.RID == rid) & (dx.EXAMDATE == exam_date)]['VISCODE2'].values if not vcodes.any(): vcodes = [np.nan] if not vcodes2.any(): vcodes2 = [np.nan] return [vcodes[0], vcodes2[0]]
Python
def _get_dx(rid, dx, exam=None, viscode=None, return_code=False): """Returns all diagnoses for a given rid, depending on exam or viscode (mutually exclusive) """ if exam is not None and viscode is not None: raise ValueError('Both exam and viscode are set !') if exam is not None: dates = dx[dx.RID == rid]['EXAMDATE'].values exam_dates = [date(int(d[:4]), int(d[5:7]), int(d[8:])) for d in dates] elif viscode is not None: if viscode[0] == 'v': # ADNI1 exam_codes = dx[dx.RID == rid]['VISCODE'].values else: # ADNI GO/2 exam_codes = dx[dx.RID == rid]['VISCODE2'].values # DXCHANGE change = dx[dx.RID == rid]['DXCHANGE'].values curren = dx[dx.RID == rid]['DXCURREN'].values # change, curren have the same length dxchange = [int(np.nanmax([change[k], curren[k]])) for k in range(len(curren))] if exam is not None and len(exam_dates) > 0: exam_date, ind = _find_closest_exam_date(exam, exam_dates) # TODO : return exam_date or exam_code ? if return_code: return exam_date else: return dxchange[ind] elif viscode is not None and len(exam_codes) > 0: exam_code, ind = _find_closest_exam_code(viscode, exam_codes) if return_code: return exam_code else: return dxchange[ind] else: return -4
def _get_dx(rid, dx, exam=None, viscode=None, return_code=False): """Returns all diagnoses for a given rid, depending on exam or viscode (mutually exclusive) """ if exam is not None and viscode is not None: raise ValueError('Both exam and viscode are set !') if exam is not None: dates = dx[dx.RID == rid]['EXAMDATE'].values exam_dates = [date(int(d[:4]), int(d[5:7]), int(d[8:])) for d in dates] elif viscode is not None: if viscode[0] == 'v': # ADNI1 exam_codes = dx[dx.RID == rid]['VISCODE'].values else: # ADNI GO/2 exam_codes = dx[dx.RID == rid]['VISCODE2'].values # DXCHANGE change = dx[dx.RID == rid]['DXCHANGE'].values curren = dx[dx.RID == rid]['DXCURREN'].values # change, curren have the same length dxchange = [int(np.nanmax([change[k], curren[k]])) for k in range(len(curren))] if exam is not None and len(exam_dates) > 0: exam_date, ind = _find_closest_exam_date(exam, exam_dates) # TODO : return exam_date or exam_code ? if return_code: return exam_date else: return dxchange[ind] elif viscode is not None and len(exam_codes) > 0: exam_code, ind = _find_closest_exam_code(viscode, exam_codes) if return_code: return exam_code else: return dxchange[ind] else: return -4
Python
def _get_cdr(rid, cdr): """Returns cdr for a given rid """ c = cdr[cdr.RID == rid]['CDGLOBAL'].dropna().values c = c[c >= 0] if len(c) > 0: return np.percentile(c, 50, interpolation='nearest') else: return 0.
def _get_cdr(rid, cdr): """Returns cdr for a given rid """ c = cdr[cdr.RID == rid]['CDGLOBAL'].dropna().values c = c[c >= 0] if len(c) > 0: return np.percentile(c, 50, interpolation='nearest') else: return 0.
Python
def _get_gdscale(rid, gdscale): """Returns gdscale for a given rid """ g = gdscale[gdscale.RID == rid]['GDTOTAL'].dropna().values g = g[g >= 0] if len(g) > 0: return np.percentile(g, 50, interpolation='nearest') else: return 0.
def _get_gdscale(rid, gdscale): """Returns gdscale for a given rid """ g = gdscale[gdscale.RID == rid]['GDTOTAL'].dropna().values g = g[g >= 0] if len(g) > 0: return np.percentile(g, 50, interpolation='nearest') else: return 0.
Python
def _get_npiq(rid, npiq): """Returns npiq for a given rid """ n = npiq[npiq.RID == rid]['NPISCORE'].dropna().values n = n[n >= 0] if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
def _get_npiq(rid, npiq): """Returns npiq for a given rid """ n = npiq[npiq.RID == rid]['NPISCORE'].dropna().values n = n[n >= 0] if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
Python
def _get_faq(rid, faq): """Returns faq for a given rid """ f = faq[faq.RID == rid]['FAQTOTAL'].dropna().values f = f[f >= 0] if len(f) > 0: return np.percentile(f, 50, interpolation='nearest') else: return 0.
def _get_faq(rid, faq): """Returns faq for a given rid """ f = faq[faq.RID == rid]['FAQTOTAL'].dropna().values f = f[f >= 0] if len(f) > 0: return np.percentile(f, 50, interpolation='nearest') else: return 0.
Python
def _get_adas(rid, adas1, adas2, mode=11): """Returns adas for a given rid mode : 11 or 13 """ if mode == 11: key1, key2 = 'TOTAL11', 'TOTSCORE' elif mode == 13: key1, key2 = 'TOTALMOD', 'TOTAL13' else: raise(ValueError('adas mode should be 11 or 13 you gave %u' % mode)) a = adas1[adas1.RID == rid][key1].dropna().values if len(a) == 0: a = adas2[adas2.RID == rid][key2].dropna().values a = a[a >= 0] if len(a) > 0: return np.percentile(a, 50, interpolation='nearest') else: return 0.
def _get_adas(rid, adas1, adas2, mode=11): """Returns adas for a given rid mode : 11 or 13 """ if mode == 11: key1, key2 = 'TOTAL11', 'TOTSCORE' elif mode == 13: key1, key2 = 'TOTALMOD', 'TOTAL13' else: raise(ValueError('adas mode should be 11 or 13 you gave %u' % mode)) a = adas1[adas1.RID == rid][key1].dropna().values if len(a) == 0: a = adas2[adas2.RID == rid][key2].dropna().values a = a[a >= 0] if len(a) > 0: return np.percentile(a, 50, interpolation='nearest') else: return 0.
Python
def _get_neurobat(rid, neurobat, mode=1): """Returns neurobat for a given rid """ if mode == 1: key = 'LDELTOTAL' elif mode == 2: key = 'LIMMTOTAL' else: raise(ValueError('neurobat mode should be 1 or 2 you gave %u' % mode)) n = neurobat[neurobat.RID == rid][key].dropna().values n = n[n >= 0] if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
def _get_neurobat(rid, neurobat, mode=1): """Returns neurobat for a given rid """ if mode == 1: key = 'LDELTOTAL' elif mode == 2: key = 'LIMMTOTAL' else: raise(ValueError('neurobat mode should be 1 or 2 you gave %u' % mode)) n = neurobat[neurobat.RID == rid][key].dropna().values n = n[n >= 0] if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
Python
def _get_nss(rid, nss, mode=1): """Returns nss for a given rid """ if mode == 1: key = 'ADNI_MEM' elif mode == 2: key = 'ADNI_EF' else: raise(ValueError('nss mode should be 1 or 2 you gave %u' % mode)) n = nss[nss.RID == rid][key].dropna().values if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
def _get_nss(rid, nss, mode=1): """Returns nss for a given rid """ if mode == 1: key = 'ADNI_MEM' elif mode == 2: key = 'ADNI_EF' else: raise(ValueError('nss mode should be 1 or 2 you gave %u' % mode)) n = nss[nss.RID == rid][key].dropna().values if len(n) > 0: return np.percentile(n, 50, interpolation='nearest') else: return 0.
Python
def _get_mmse(rid, mmse): """Returns mmse for a given rid """ m = mmse[mmse.RID == rid]['MMSCORE'].dropna().values if len(m) > 0: return np.median(m) else: return 0.
def _get_mmse(rid, mmse): """Returns mmse for a given rid """ m = mmse[mmse.RID == rid]['MMSCORE'].dropna().values if len(m) > 0: return np.median(m) else: return 0.
Python
def _get_dob(rid, demog): """Returns date of birth of a given rid """ yy = demog[demog.RID == rid]['PTDOBYY'].dropna().values mm = demog[demog.RID == rid]['PTDOBMM'].dropna().values if len(yy) > 0 and len(mm) > 0: return date(int(yy[0]), int(mm[0]), 1) else: return date(1900, 1, 1)
def _get_dob(rid, demog): """Returns date of birth of a given rid """ yy = demog[demog.RID == rid]['PTDOBYY'].dropna().values mm = demog[demog.RID == rid]['PTDOBMM'].dropna().values if len(yy) > 0 and len(mm) > 0: return date(int(yy[0]), int(mm[0]), 1) else: return date(1900, 1, 1)
Python
def _get_gender(rid, demog): """Returns gender if a given rid """ gender = demog[demog.RID == rid]['PTGENDER'].dropna().values if len(gender) > 0: return int(gender[0]) else: return -1
def _get_gender(rid, demog): """Returns gender if a given rid """ gender = demog[demog.RID == rid]['PTGENDER'].dropna().values if len(gender) > 0: return int(gender[0]) else: return -1
Python
def _get_group_indices(dx_group): """Returns indices for each clinical group """ dx_group = np.array(dx_group) idx = {} for g in ['AD', 'MCI', 'LMCI', 'EMCI', 'Normal', 'MCI-Converter', 'Normal->MCI']: idx[g] = np.where(dx_group == g)[0] for g in ['EMCI', 'LMCI']: idx['MCI'] = np.hstack((idx['MCI'], idx[g])) idx['AD-rest'] = np.hstack((idx['MCI'], idx['Normal'])) idx['MCI-rest'] = np.hstack((idx['AD'], idx['Normal'])) idx['Normal-rest'] = np.hstack((idx['AD'], idx['MCI'])) return idx
def _get_group_indices(dx_group): """Returns indices for each clinical group """ dx_group = np.array(dx_group) idx = {} for g in ['AD', 'MCI', 'LMCI', 'EMCI', 'Normal', 'MCI-Converter', 'Normal->MCI']: idx[g] = np.where(dx_group == g)[0] for g in ['EMCI', 'LMCI']: idx['MCI'] = np.hstack((idx['MCI'], idx[g])) idx['AD-rest'] = np.hstack((idx['MCI'], idx['Normal'])) idx['MCI-rest'] = np.hstack((idx['AD'], idx['Normal'])) idx['Normal-rest'] = np.hstack((idx['AD'], idx['MCI'])) return idx
Python
def _get_classification_data(features, dx_group, groups, return_idx=False): """Set X and y for classification according to the chosen groups Returns : X, y, (idx) """ # get group indices dx_idx = _get_group_indices(dx_group) # stack the desired indices idx_ = [] for group in groups: idx_.extend(dx_idx[group]) # extract corresponding features and classes (binary-only) X = features[idx_, ...] y = np.hstack(([1]*len(dx_idx[groups[0]]), [-1]*len(dx_idx[groups[1]]))) if return_idx: return X, y, idx_ else: return X, y
def _get_classification_data(features, dx_group, groups, return_idx=False): """Set X and y for classification according to the chosen groups Returns : X, y, (idx) """ # get group indices dx_idx = _get_group_indices(dx_group) # stack the desired indices idx_ = [] for group in groups: idx_.extend(dx_idx[group]) # extract corresponding features and classes (binary-only) X = features[idx_, ...] y = np.hstack(([1]*len(dx_idx[groups[0]]), [-1]*len(dx_idx[groups[1]]))) if return_idx: return X, y, idx_ else: return X, y
Python
def _get_group_data(features, dx_group, group): """Returns X for a given group """ # get group indices dx_idx = _get_group_indices(dx_group) # extract and return the corresponding features return features[dx_idx[group], ...]
def _get_group_data(features, dx_group, group): """Returns X for a given group """ # get group indices dx_idx = _get_group_indices(dx_group) # extract and return the corresponding features return features[dx_idx[group], ...]
Python
def StratifiedSubjectShuffleSplit(dataset, groups, n_iter=100, test_size=.3, random_state=42): """ Stratified ShuffleSplit on subjects (train and test size may change depending on the number of acquistions)""" idx = _get_group_indices(dataset.dx_group) groups_idx = np.hstack([idx[group] for group in groups]) subjects = np.asarray(dataset.subjects) subjects = subjects[groups_idx] dx = np.asarray(dataset.dx_group) dx = dx[groups_idx] # extract unique subject ids and dx (subjects_unique_values, subjects_unique_indices) = np.unique(subjects, return_index=True) # extract indices for the needed groups dx_unique_values = dx[subjects_unique_indices] y = dx_unique_values # generate folds stratified on dx sss = StratifiedShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) ssss = [] for tr, ts in sss: # get training subjects subjects_tr = subjects_unique_values[tr] # get testing subjects subjects_ts = subjects_unique_values[ts] # get all subject indices train = [] test = [] for subj in subjects_tr: train.extend(np.where(subjects == subj)[0]) for subj in subjects_ts: test.extend(np.where(subjects == subj)[0]) # append ssss ssss.append([train, test]) return ssss
def StratifiedSubjectShuffleSplit(dataset, groups, n_iter=100, test_size=.3, random_state=42): """ Stratified ShuffleSplit on subjects (train and test size may change depending on the number of acquistions)""" idx = _get_group_indices(dataset.dx_group) groups_idx = np.hstack([idx[group] for group in groups]) subjects = np.asarray(dataset.subjects) subjects = subjects[groups_idx] dx = np.asarray(dataset.dx_group) dx = dx[groups_idx] # extract unique subject ids and dx (subjects_unique_values, subjects_unique_indices) = np.unique(subjects, return_index=True) # extract indices for the needed groups dx_unique_values = dx[subjects_unique_indices] y = dx_unique_values # generate folds stratified on dx sss = StratifiedShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) ssss = [] for tr, ts in sss: # get training subjects subjects_tr = subjects_unique_values[tr] # get testing subjects subjects_ts = subjects_unique_values[ts] # get all subject indices train = [] test = [] for subj in subjects_tr: train.extend(np.where(subjects == subj)[0]) for subj in subjects_ts: test.extend(np.where(subjects == subj)[0]) # append ssss ssss.append([train, test]) return ssss
Python
def SubjectShuffleSplit(dataset, groups, n_iter=100, test_size=.3, random_state=42): """ Specific ShuffleSplit (train on all subject images, but test only on one image of the remaining subjects)""" idx = _get_group_indices(dataset.dx_group) groups_idx = np.hstack((idx[groups[0]], idx[groups[1]])) subjects = np.asarray(dataset.subjects) subjects = subjects[groups_idx] subjects_unique = np.unique(subjects) n = len(subjects_unique) X = np.empty((n, n)) ss = ShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) subj_ss = [] for train, test in ss.split(X): train_get = np.array([], dtype=int) for subj in subjects_unique[train]: subj_ind = np.where(subjects == subj) train_get = np.concatenate((train_get, subj_ind[0])) test_get = [] for subj in subjects_unique[test]: subj_ind = np.where(subjects == subj) test_get.append(subj_ind[0][0]) test_get = np.asarray(test_get) subj_ss.append([train_get, test_get]) return subj_ss
def SubjectShuffleSplit(dataset, groups, n_iter=100, test_size=.3, random_state=42): """ Specific ShuffleSplit (train on all subject images, but test only on one image of the remaining subjects)""" idx = _get_group_indices(dataset.dx_group) groups_idx = np.hstack((idx[groups[0]], idx[groups[1]])) subjects = np.asarray(dataset.subjects) subjects = subjects[groups_idx] subjects_unique = np.unique(subjects) n = len(subjects_unique) X = np.empty((n, n)) ss = ShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) subj_ss = [] for train, test in ss.split(X): train_get = np.array([], dtype=int) for subj in subjects_unique[train]: subj_ind = np.where(subjects == subj) train_get = np.concatenate((train_get, subj_ind[0])) test_get = [] for subj in subjects_unique[test]: subj_ind = np.where(subjects == subj) test_get.append(subj_ind[0][0]) test_get = np.asarray(test_get) subj_ss.append([train_get, test_get]) return subj_ss
Python
def SubjectSplit(dataset, n_iter=100, test_size=.3, random_state=42): """ Without dx version (split on subjects) """ subjects = np.hstack(dataset.subjects) subjects_unique = np.unique(subjects) n = len(subjects_unique) ss = ShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) subj_ss = [] for train, test in ss: train_get = np.array([], dtype=int) for subj in subjects_unique[train]: subj_ind = np.where(subjects == subj) train_get = np.concatenate((train_get, subj_ind[0])) test_get = np.array([], dtype=int) for subj in subjects_unique[test]: subj_ind = np.where(subjects == subj) test_get = np.concatenate((test_get, subj_ind[0])) subj_ss.append([train_get, test_get]) return subj_ss
def SubjectSplit(dataset, n_iter=100, test_size=.3, random_state=42): """ Without dx version (split on subjects) """ subjects = np.hstack(dataset.subjects) subjects_unique = np.unique(subjects) n = len(subjects_unique) ss = ShuffleSplit(n_splits=n_iter, test_size=test_size, random_state=random_state) subj_ss = [] for train, test in ss: train_get = np.array([], dtype=int) for subj in subjects_unique[train]: subj_ind = np.where(subjects == subj) train_get = np.concatenate((train_get, subj_ind[0])) test_get = np.array([], dtype=int) for subj in subjects_unique[test]: subj_ind = np.where(subjects == subj) test_get = np.concatenate((test_get, subj_ind[0])) subj_ss.append([train_get, test_get]) return subj_ss
Python
def _train_and_score(clf, X, y, train, test): """ Fit a classifier clf and train set and return the accuracy score on test set""" clf.fit(X[train], y[train]) return clf.score(X[test], y[test])
def _train_and_score(clf, X, y, train, test): """ Fit a classifier clf and train set and return the accuracy score on test set""" clf.fit(X[train], y[train]) return clf.score(X[test], y[test])
Python
def _train_and_score_2(clf, X, y, train, test): """ Fit a classifier clf and train set and return the score on test set""" clf.fit(X[train], y[train]) y_pred = clf.predict(X[test]) return accuracy_score(y[test], y_pred)
def _train_and_score_2(clf, X, y, train, test): """ Fit a classifier clf and train set and return the score on test set""" clf.fit(X[train], y[train]) y_pred = clf.predict(X[test]) return accuracy_score(y[test], y_pred)
Python
def _train_and_predict(clf, X, y, train, test): """ Fit a classifier clf and train set and return predictions on test set""" clf.fit(X[train], y[train]) y_pred = clf.predict(X[test]) return accuracy_score(y[test], y_pred)
def _train_and_predict(clf, X, y, train, test): """ Fit a classifier clf and train set and return predictions on test set""" clf.fit(X[train], y[train]) y_pred = clf.predict(X[test]) return accuracy_score(y[test], y_pred)
Python
def _get_subjects_splits_reg(dataset, n_iter=100, test_size=.25, random_state=42): """Returns X, sss Works only with longitudinal data """ ss = SubjectSplit(dataset, n_iter=n_iter, test_size=test_size, random_state=42) X = np.hstack(dataset.imgs) return X, ss
def _get_subjects_splits_reg(dataset, n_iter=100, test_size=.25, random_state=42): """Returns X, sss Works only with longitudinal data """ ss = SubjectSplit(dataset, n_iter=n_iter, test_size=test_size, random_state=42) X = np.hstack(dataset.imgs) return X, ss
Python
def _get_y_from_dx(dx, target='AD'): """ dx is a vector of labels """ dx = np.hstack(dx) y = - np.ones(dx.shape[0]) y[dx == 'AD'] = 1 return y
def _get_y_from_dx(dx, target='AD'): """ dx is a vector of labels """ dx = np.hstack(dx) y = - np.ones(dx.shape[0]) y[dx == 'AD'] = 1 return y
Python
def _binarize_dx(dx, target=['AD', 'MCI->AD']): """ dx is a vector of labels """ dx = np.hstack(dx) y = - np.ones(dx.shape[0]) for t in target: y[dx == t] = 1 return y
def _binarize_dx(dx, target=['AD', 'MCI->AD']): """ dx is a vector of labels """ dx = np.hstack(dx) y = - np.ones(dx.shape[0]) for t in target: y[dx == t] = 1 return y
Python
def load_adni_longitudinal_rs_fmri_DARTEL(): """ Returns longitudinal func processed with DARTEL """ return load_adni_longitudinal_rs_fmri('ADNI_longitudinal_rs_fmri_DARTEL', 'resampled*.nii')
def load_adni_longitudinal_rs_fmri_DARTEL(): """ Returns longitudinal func processed with DARTEL """ return load_adni_longitudinal_rs_fmri('ADNI_longitudinal_rs_fmri_DARTEL', 'resampled*.nii')
Python
def load_adni_longitudinal_rs_fmri(dirname='ADNI_longitudinal_rs_fmri', prefix='wr*.nii'): """ Returns paths of ADNI rs-fMRI """ # get file paths and description images, subject_paths, description = _get_subjects_and_description( base_dir=dirname, prefix='I[0-9]*') images = np.array(images) # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/' + prefix, first_img=True), subject_paths)) func_files = np.array(func_files) # get motion files # motions = None motions = list(map(lambda x: _glob_subject_img( x, suffix='func/' + 'rp_*.txt', first_img=True), subject_paths)) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] df = df.sort_values(by='Image_ID') dx_group = np.array(df['DX_Group']) subjects = np.array(df['Subject_ID']) exams = np.array(df['EXAM_DATE']) exams = [date(int(e[:4]), int(e[5:7]), int(e[8:])) for e in exams] # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridsfmri(subjects): return [_ptid_to_rid(s, roster) for s in subjects] rids = np.array(memory.cache(_get_ridsfmri)(subjects)) def _get_examdatesfmri(rids): return [_get_dx(rids[i], dx, exams[i], viscode=None, return_code=True) for i in range(len(rids))] exam_dates = np.array(memory.cache(_get_examdatesfmri)(rids)) def _get_viscodesfmri(rids): return [_get_vcodes(rids[i], str(exam_dates[i]), dx) for i in range(len(rids))] viscodes = np.array(memory.cache(_get_viscodesfmri)(rids)) vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] return Bunch(func=func_files, dx_group=dx_group, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2, motion=motions, subjects=subjects, images=images) # return Bunch(func=func_files, dx_group=dx_group, # subjects=subjects, images=images)
def load_adni_longitudinal_rs_fmri(dirname='ADNI_longitudinal_rs_fmri', prefix='wr*.nii'): """ Returns paths of ADNI rs-fMRI """ # get file paths and description images, subject_paths, description = _get_subjects_and_description( base_dir=dirname, prefix='I[0-9]*') images = np.array(images) # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/' + prefix, first_img=True), subject_paths)) func_files = np.array(func_files) # get motion files # motions = None motions = list(map(lambda x: _glob_subject_img( x, suffix='func/' + 'rp_*.txt', first_img=True), subject_paths)) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] df = df.sort_values(by='Image_ID') dx_group = np.array(df['DX_Group']) subjects = np.array(df['Subject_ID']) exams = np.array(df['EXAM_DATE']) exams = [date(int(e[:4]), int(e[5:7]), int(e[8:])) for e in exams] # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridsfmri(subjects): return [_ptid_to_rid(s, roster) for s in subjects] rids = np.array(memory.cache(_get_ridsfmri)(subjects)) def _get_examdatesfmri(rids): return [_get_dx(rids[i], dx, exams[i], viscode=None, return_code=True) for i in range(len(rids))] exam_dates = np.array(memory.cache(_get_examdatesfmri)(rids)) def _get_viscodesfmri(rids): return [_get_vcodes(rids[i], str(exam_dates[i]), dx) for i in range(len(rids))] viscodes = np.array(memory.cache(_get_viscodesfmri)(rids)) vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] return Bunch(func=func_files, dx_group=dx_group, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2, motion=motions, subjects=subjects, images=images) # return Bunch(func=func_files, dx_group=dx_group, # subjects=subjects, images=images)
Python
def load_adni_rs_fmri(): """ Returns paths of ADNI resting-state fMRI """ # get file paths and description subjects, subject_paths, description = _get_subjects_and_description( base_dir='ADNI_baseline_rs_fmri_mri', prefix='s[0-9]*') # get the correct subject_id subjects = [s[1:] for s in subjects] # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/swr*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group_x']) mmscores = np.array(df['MMSCORE']) return Bunch(func=func_files, dx_group=dx_group, mmscores=mmscores, subjects=subjects)
def load_adni_rs_fmri(): """ Returns paths of ADNI resting-state fMRI """ # get file paths and description subjects, subject_paths, description = _get_subjects_and_description( base_dir='ADNI_baseline_rs_fmri_mri', prefix='s[0-9]*') # get the correct subject_id subjects = [s[1:] for s in subjects] # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/swr*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group_x']) mmscores = np.array(df['MMSCORE']) return Bunch(func=func_files, dx_group=dx_group, mmscores=mmscores, subjects=subjects)
Python
def load_adni_longitudinal_av45_pet(): """Returns paths of longitudinal ADNI AV45-PET """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description(base_dir='ADNI_av45_pet', prefix='I[0-9]*') # get pet files pet_files = map(lambda x: _glob_subject_img(x, suffix='pet/wr*.nii', first_img=False), subject_paths).tolist() idx = [0] pet_files_all = [] for pet_file in pet_files: idx.append(idx[-1] + len(pet_file)) pet_files_all.extend(pet_file) pet_files_all = np.array(pet_files_all) images = [os.path.split(pet_file)[-1].split('_')[-1][:-4] for pet_file in pet_files_all] images = np.array(images) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] dx_group_all = np.array(df['DX_Group']) subjects_all = np.array(df['Subject_ID']) ages = np.array(df['Age']) exams = np.array(df['Study_Date']) exams = list(map(lambda e: datetime.strptime(e, '%m/%d/%Y').date(), exams)) # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridspet(subjects_all): return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all)) rids = memory.cache(_get_ridspet)(subjects_all) def _get_examdatespet(rids): return list(map(lambda i: _get_dx( rids[i], dx, exams[i], viscode=None, return_code=True), range(len(rids)))) exam_dates = np.array(memory.cache(_get_examdatespet)(rids)) def _get_viscodespet(rids): return list(map(lambda i: _get_vcodes( rids[i], str(exam_dates[i]), dx), range(len(rids)))) viscodes = np.array(memory.cache(_get_viscodespet)(rids)) if len(viscodes) > 0: vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] else: vcodes, vcodes2 = None, None return Bunch(pet=pet_files_all, dx_group=dx_group_all, images=images, ages=ages, subjects=subjects_all, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2)
def load_adni_longitudinal_av45_pet(): """Returns paths of longitudinal ADNI AV45-PET """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description(base_dir='ADNI_av45_pet', prefix='I[0-9]*') # get pet files pet_files = map(lambda x: _glob_subject_img(x, suffix='pet/wr*.nii', first_img=False), subject_paths).tolist() idx = [0] pet_files_all = [] for pet_file in pet_files: idx.append(idx[-1] + len(pet_file)) pet_files_all.extend(pet_file) pet_files_all = np.array(pet_files_all) images = [os.path.split(pet_file)[-1].split('_')[-1][:-4] for pet_file in pet_files_all] images = np.array(images) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] dx_group_all = np.array(df['DX_Group']) subjects_all = np.array(df['Subject_ID']) ages = np.array(df['Age']) exams = np.array(df['Study_Date']) exams = list(map(lambda e: datetime.strptime(e, '%m/%d/%Y').date(), exams)) # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridspet(subjects_all): return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all)) rids = memory.cache(_get_ridspet)(subjects_all) def _get_examdatespet(rids): return list(map(lambda i: _get_dx( rids[i], dx, exams[i], viscode=None, return_code=True), range(len(rids)))) exam_dates = np.array(memory.cache(_get_examdatespet)(rids)) def _get_viscodespet(rids): return list(map(lambda i: _get_vcodes( rids[i], str(exam_dates[i]), dx), range(len(rids)))) viscodes = np.array(memory.cache(_get_viscodespet)(rids)) if len(viscodes) > 0: vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] else: vcodes, vcodes2 = None, None return Bunch(pet=pet_files_all, dx_group=dx_group_all, images=images, ages=ages, subjects=subjects_all, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2)
Python
def load_adni_longitudinal_fdg_pet(): """Returns paths of longitudinal ADNI FDG-PET """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description( base_dir='ADNI_longitudinal_fdg_pet', prefix='[0-9]*') # get pet files pet_files = list(map(lambda x: _glob_subject_img( x, suffix='pet/wr*.nii', first_img=False), subject_paths)) idx = [0] pet_files_all = [] for pet_file in pet_files: idx.append(idx[-1] + len(pet_file)) pet_files_all.extend(pet_file) pet_files_all = np.array(pet_files_all) images = [os.path.split(pet_file)[-1].split('_')[-1][:-4] for pet_file in pet_files_all] images = np.array(images) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] dx_group_all = np.array(df['DX_Group']) dx_conv_all = np.array(df['DX_Conv']) subjects_all = np.array(df['Subject_ID']) ages = np.array(df['Age']) exams = np.array(df['Exam_Date']) exams = list(map(lambda e: date(int(e[:4]), int(e[5:7]), int(e[8:])), exams)) # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridspet(subjects_all): return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all)) rids = memory.cache(_get_ridspet)(subjects_all) def _get_examdatespet(rids): return list(map(lambda i: _get_dx( rids[i], dx, exams[i], viscode=None, return_code=True), range(len(rids)))) exam_dates = np.array(memory.cache(_get_examdatespet)(rids)) def _get_viscodespet(rids): return list(map(lambda i: _get_vcodes( rids[i], str(exam_dates[i]), dx), range(len(rids)))) viscodes = np.array(memory.cache(_get_viscodespet)(rids)) vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] return Bunch(pet=pet_files_all, dx_group=dx_group_all, dx_conv=dx_conv_all, images=images, ages=ages, subjects=subjects_all, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2)
def load_adni_longitudinal_fdg_pet(): """Returns paths of longitudinal ADNI FDG-PET """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description( base_dir='ADNI_longitudinal_fdg_pet', prefix='[0-9]*') # get pet files pet_files = list(map(lambda x: _glob_subject_img( x, suffix='pet/wr*.nii', first_img=False), subject_paths)) idx = [0] pet_files_all = [] for pet_file in pet_files: idx.append(idx[-1] + len(pet_file)) pet_files_all.extend(pet_file) pet_files_all = np.array(pet_files_all) images = [os.path.split(pet_file)[-1].split('_')[-1][:-4] for pet_file in pet_files_all] images = np.array(images) # get phenotype from csv dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'DXSUM_PDXCONV_ADNIALL.csv')) roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'), 'ROSTER.csv')) df = description[description['Image_ID'].isin(images)] dx_group_all = np.array(df['DX_Group']) dx_conv_all = np.array(df['DX_Conv']) subjects_all = np.array(df['Subject_ID']) ages = np.array(df['Age']) exams = np.array(df['Exam_Date']) exams = list(map(lambda e: date(int(e[:4]), int(e[5:7]), int(e[8:])), exams)) # caching dataframe extraction functions CACHE_DIR = _get_cache_base_dir() cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache') if not os.path.isdir(cache_dir): os.makedirs(cache_dir) memory = Memory(cachedir=cache_dir, verbose=0) def _get_ridspet(subjects_all): return list(map(lambda s: _ptid_to_rid(s, roster), subjects_all)) rids = memory.cache(_get_ridspet)(subjects_all) def _get_examdatespet(rids): return list(map(lambda i: _get_dx( rids[i], dx, exams[i], viscode=None, return_code=True), range(len(rids)))) exam_dates = np.array(memory.cache(_get_examdatespet)(rids)) def _get_viscodespet(rids): return list(map(lambda i: _get_vcodes( rids[i], str(exam_dates[i]), dx), range(len(rids)))) viscodes = np.array(memory.cache(_get_viscodespet)(rids)) vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1] return Bunch(pet=pet_files_all, dx_group=dx_group_all, dx_conv=dx_conv_all, images=images, ages=ages, subjects=subjects_all, exam_codes=vcodes, exam_dates=exam_dates, exam_codes2=vcodes2)
Python
def load_adni_baseline_rs_fmri(): """ Returns paths of ADNI rs-fMRI """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description( base_dir='ADNI_baseline_rs_fmri', prefix='[0-9]*') # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/wr*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group']) return Bunch(func=func_files, dx_group=dx_group, subjects=subjects)
def load_adni_baseline_rs_fmri(): """ Returns paths of ADNI rs-fMRI """ # get file paths and description (subjects, subject_paths, description) = _get_subjects_and_description( base_dir='ADNI_baseline_rs_fmri', prefix='[0-9]*') # get func files func_files = list(map(lambda x: _glob_subject_img( x, suffix='func/wr*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group']) return Bunch(func=func_files, dx_group=dx_group, subjects=subjects)
Python
def load_adni_rs_fmri_conn(filename): """Returns paths of ADNI rs-fMRI processed connectivity for a given npy file with shape : n_subjects x n_voxels x n_rois """ FEAT_DIR = _get_data_base_dir('features') conn_file = os.path.join(FEAT_DIR, 'smooth_preproc', filename) if not os.path.isfile(conn_file): raise OSError('Connectivity file not found ...') dataset = load_adni_petmr() subj_list = dataset['subjects'] return Bunch(fmri_data=conn_file, dx_group=np.array(dataset['dx_group']), mmscores=np.array(dataset['mmscores']), subjects=subj_list)
def load_adni_rs_fmri_conn(filename): """Returns paths of ADNI rs-fMRI processed connectivity for a given npy file with shape : n_subjects x n_voxels x n_rois """ FEAT_DIR = _get_data_base_dir('features') conn_file = os.path.join(FEAT_DIR, 'smooth_preproc', filename) if not os.path.isfile(conn_file): raise OSError('Connectivity file not found ...') dataset = load_adni_petmr() subj_list = dataset['subjects'] return Bunch(fmri_data=conn_file, dx_group=np.array(dataset['dx_group']), mmscores=np.array(dataset['mmscores']), subjects=subj_list)
Python
def load_adni_fdg_pet(): """Returns paths of ADNI baseline FDG-PET """ # get file paths and description subjects, subject_paths, description = _get_subjects_and_description( base_dir='ADNI_baseline_fdg_pet', prefix='s[0-9]*') # get the correct subject_id subjects = [s[1:] for s in subjects] # get pet files pet_files = list(map(lambda x: _glob_subject_img( x, suffix='pet/w*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group']) mmscores = np.array(df['MMSCORE']) return Bunch(pet=pet_files, dx_group=dx_group, mmscores=mmscores, subjects=subjects)
def load_adni_fdg_pet(): """Returns paths of ADNI baseline FDG-PET """ # get file paths and description subjects, subject_paths, description = _get_subjects_and_description( base_dir='ADNI_baseline_fdg_pet', prefix='s[0-9]*') # get the correct subject_id subjects = [s[1:] for s in subjects] # get pet files pet_files = list(map(lambda x: _glob_subject_img( x, suffix='pet/w*.nii', first_img=True), subject_paths)) # get phenotype from csv df = description[description['Subject_ID'].isin(subjects)] dx_group = np.array(df['DX_Group']) mmscores = np.array(df['MMSCORE']) return Bunch(pet=pet_files, dx_group=dx_group, mmscores=mmscores, subjects=subjects)
Python
def load_adni_fdg_pet_diff(): """Returns paths of the diff between PET and fMRI datasets """ pet_dataset = load_adni_fdg_pet() fmri_dataset = load_adni_rs_fmri() remaining_subjects = np.setdiff1d(pet_dataset['subjects'], fmri_dataset['subjects']) pet_idx = [] for pet_subject in remaining_subjects: pet_idx.append( np.where(np.array(pet_dataset['subjects']) == pet_subject)[0][0]) pet_idx = np.array(pet_idx, dtype=np.intp) pet_groups = np.array(pet_dataset['dx_group']) pet_groups = pet_groups[pet_idx] pet_mmscores = np.array(pet_dataset['mmscores']) pet_mmscores = pet_mmscores[pet_idx] pet_files = np.array(pet_dataset['pet'])[pet_idx] return Bunch(pet=pet_files, dx_group=pet_groups, mmscores=pet_mmscores, subjects=remaining_subjects)
def load_adni_fdg_pet_diff(): """Returns paths of the diff between PET and fMRI datasets """ pet_dataset = load_adni_fdg_pet() fmri_dataset = load_adni_rs_fmri() remaining_subjects = np.setdiff1d(pet_dataset['subjects'], fmri_dataset['subjects']) pet_idx = [] for pet_subject in remaining_subjects: pet_idx.append( np.where(np.array(pet_dataset['subjects']) == pet_subject)[0][0]) pet_idx = np.array(pet_idx, dtype=np.intp) pet_groups = np.array(pet_dataset['dx_group']) pet_groups = pet_groups[pet_idx] pet_mmscores = np.array(pet_dataset['mmscores']) pet_mmscores = pet_mmscores[pet_idx] pet_files = np.array(pet_dataset['pet'])[pet_idx] return Bunch(pet=pet_files, dx_group=pet_groups, mmscores=pet_mmscores, subjects=remaining_subjects)
Python
def load_adni_petmr(): """Returns paths of the intersection between PET and FMRI datasets """ pet_dataset = load_adni_fdg_pet() fmri_dataset = load_adni_rs_fmri() petmr_subjects = np.intersect1d(pet_dataset['subjects'], fmri_dataset['subjects'], assume_unique=True) petmr_idx = [] mrpet_idx = [] for petmr_subject in petmr_subjects: petmr_idx.append( np.where( np.array(pet_dataset['subjects']) == petmr_subject)[0][0]) mrpet_idx.append( np.where( np.array(fmri_dataset['subjects']) == petmr_subject)[0][0]) petmr_idx = np.array(petmr_idx, dtype=np.intp) mrpet_idx = np.array(mrpet_idx, dtype=np.intp) pet_groups = np.array(pet_dataset['dx_group']) petmr_groups = pet_groups[petmr_idx] pet_mmscores = np.array(pet_dataset['mmscores']) petmr_mmscores = pet_mmscores[petmr_idx] func_files = np.array(fmri_dataset['func'])[mrpet_idx] pet_files = np.array(pet_dataset['pet'])[petmr_idx] return Bunch(func=func_files, pet=pet_files, dx_group=petmr_groups, mmscores=petmr_mmscores, subjects=petmr_subjects)
def load_adni_petmr(): """Returns paths of the intersection between PET and FMRI datasets """ pet_dataset = load_adni_fdg_pet() fmri_dataset = load_adni_rs_fmri() petmr_subjects = np.intersect1d(pet_dataset['subjects'], fmri_dataset['subjects'], assume_unique=True) petmr_idx = [] mrpet_idx = [] for petmr_subject in petmr_subjects: petmr_idx.append( np.where( np.array(pet_dataset['subjects']) == petmr_subject)[0][0]) mrpet_idx.append( np.where( np.array(fmri_dataset['subjects']) == petmr_subject)[0][0]) petmr_idx = np.array(petmr_idx, dtype=np.intp) mrpet_idx = np.array(mrpet_idx, dtype=np.intp) pet_groups = np.array(pet_dataset['dx_group']) petmr_groups = pet_groups[petmr_idx] pet_mmscores = np.array(pet_dataset['mmscores']) petmr_mmscores = pet_mmscores[petmr_idx] func_files = np.array(fmri_dataset['func'])[mrpet_idx] pet_files = np.array(pet_dataset['pet'])[petmr_idx] return Bunch(func=func_files, pet=pet_files, dx_group=petmr_groups, mmscores=petmr_mmscores, subjects=petmr_subjects)
Python
def load_atlas(atlas_name): """Retruns selected atlas path atlas_names values are : msdl, harvard_oxford, juelich, mayo ... """ CACHE_DIR = _get_cache_base_dir() if atlas_name == 'msdl': from nilearn.datasets import load_atlas_msdl atlas = load_atlas_msdl()['maps'] elif atlas_name == 'harvard_oxford': atlas = os.path.join(CACHE_DIR, 'atlas', 'HarvardOxford-cortl-maxprob-thr0-2mm.nii.gz') elif atlas_name == 'juelich': atlas = os.path.join(CACHE_DIR, 'atlas', 'Juelich-maxprob-thr0-2mm.nii.gz') elif atlas_name == 'julich': atlas = os.path.join(CACHE_DIR, 'atlas', 'Juelich-prob-2mm.nii.gz') elif atlas_name == 'mayo': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_68_rois.nii.gz') elif atlas_name == 'canica': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_61_rois.nii.gz') elif atlas_name == 'canica141': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_141_rois.nii.gz') elif atlas_name == 'tvmsdl': atlas = os.path.join(CACHE_DIR, 'atlas', 'tvmsdl_abide.nii.gz') elif atlas_name == 'kmeans': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_kmeans.nii.gz') else: raise OSError('Atlas not found !') return atlas
def load_atlas(atlas_name): """Retruns selected atlas path atlas_names values are : msdl, harvard_oxford, juelich, mayo ... """ CACHE_DIR = _get_cache_base_dir() if atlas_name == 'msdl': from nilearn.datasets import load_atlas_msdl atlas = load_atlas_msdl()['maps'] elif atlas_name == 'harvard_oxford': atlas = os.path.join(CACHE_DIR, 'atlas', 'HarvardOxford-cortl-maxprob-thr0-2mm.nii.gz') elif atlas_name == 'juelich': atlas = os.path.join(CACHE_DIR, 'atlas', 'Juelich-maxprob-thr0-2mm.nii.gz') elif atlas_name == 'julich': atlas = os.path.join(CACHE_DIR, 'atlas', 'Juelich-prob-2mm.nii.gz') elif atlas_name == 'mayo': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_68_rois.nii.gz') elif atlas_name == 'canica': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_61_rois.nii.gz') elif atlas_name == 'canica141': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_141_rois.nii.gz') elif atlas_name == 'tvmsdl': atlas = os.path.join(CACHE_DIR, 'atlas', 'tvmsdl_abide.nii.gz') elif atlas_name == 'kmeans': atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_kmeans.nii.gz') else: raise OSError('Atlas not found !') return atlas